code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
'''simple docstring''' def a_ ( _UpperCAmelCase : Optional[Any] ) -> List[str]: __snake_case : Dict = [] __snake_case : Any = set({'(', '[', '{'} ) __snake_case : List[Any] = set({')', ']', '}'} ) __snake_case : List[Any] = {'{': '}', '[': ']', '(': ')'} for i in range(len(_UpperCAmelCase ) ): if s[i] in open_brackets: stack.append(s[i] ) elif s[i] in closed_brackets and ( len(_UpperCAmelCase ) == 0 or (len(_UpperCAmelCase ) > 0 and open_to_closed[stack.pop()] != s[i]) ): return False return len(_UpperCAmelCase ) == 0 def a_ ( ) -> Optional[int]: __snake_case : Any = input('Enter sequence of brackets: ' ) if is_balanced(_UpperCAmelCase ): print(_UpperCAmelCase ,'is balanced' ) else: print(_UpperCAmelCase ,'is not balanced' ) if __name__ == "__main__": main()
0
'''simple docstring''' from __future__ import annotations A__ : str = '''Muhammad Umer Farooq''' A__ : int = '''MIT''' A__ : Optional[int] = '''1.0.0''' A__ : List[Any] = '''Muhammad Umer Farooq''' A__ : Optional[Any] = '''[email protected]''' A__ : Optional[Any] = '''Alpha''' import re from html.parser import HTMLParser from urllib import parse import requests class snake_case__ ( SCREAMING_SNAKE_CASE_ ): def __init__( self : Union[str, Any] , __a : str ) -> None: '''simple docstring''' super().__init__() __snake_case : list[str] = [] __snake_case : Dict = domain def A_ ( self : Dict , __a : str , __a : list[tuple[str, str | None]] ) -> None: '''simple docstring''' # Only parse the 'anchor' tag. if tag == "a": # Check the list of defined attributes. for name, value in attrs: # If href is defined, and not empty nor # print it. if name == "href" and value != "#" and value != "": # If not already in urls. if value not in self.urls: __snake_case : Optional[Any] = parse.urljoin(self.domain , __a ) self.urls.append(__a ) def a_ ( _UpperCAmelCase : str ) -> str: return ".".join(get_sub_domain_name(_UpperCAmelCase ).split('.' )[-2:] ) def a_ ( _UpperCAmelCase : str ) -> str: return parse.urlparse(_UpperCAmelCase ).netloc def a_ ( _UpperCAmelCase : str = "https://github.com" ) -> list[str]: __snake_case : List[Any] = get_domain_name(_UpperCAmelCase ) # Initialize the parser __snake_case : Tuple = Parser(_UpperCAmelCase ) try: # Open URL __snake_case : Any = requests.get(_UpperCAmelCase ) # pass the raw HTML to the parser to get links parser.feed(r.text ) # Get links and loop through __snake_case : Dict = set() for link in parser.urls: # open URL. # read = requests.get(link) try: __snake_case : List[Any] = requests.get(_UpperCAmelCase ) # Get the valid email. __snake_case : Optional[Any] = re.findall('[a-zA-Z0-9]+@' + domain ,read.text ) # If not in list then append it. for email in emails: valid_emails.add(_UpperCAmelCase ) except ValueError: pass except ValueError: raise SystemExit(1 ) # Finally return a sorted list of email addresses with no duplicates. return sorted(_UpperCAmelCase ) if __name__ == "__main__": A__ : Tuple = emails_from_url('''https://github.com''') print(F"""{len(emails)} emails found:""") print('''\n'''.join(sorted(emails)))
0
1
'''simple docstring''' A__ : Any = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] A__ : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] A__ : Tuple = { 0: '''Sunday''', 1: '''Monday''', 2: '''Tuesday''', 3: '''Wednesday''', 4: '''Thursday''', 5: '''Friday''', 6: '''Saturday''', } def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ) -> str: assert len(str(_UpperCAmelCase ) ) > 2, "year should be in YYYY format" assert 1 <= month <= 12, "month should be between 1 to 12" assert 1 <= day <= 31, "day should be between 1 to 31" # Doomsday algorithm: __snake_case : str = year // 1_00 __snake_case : Tuple = (5 * (century % 4) + 2) % 7 __snake_case : Any = year % 1_00 __snake_case : Optional[int] = centurian % 12 __snake_case : List[Any] = ( (centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor ) % 7 __snake_case : Optional[int] = ( DOOMSDAY_NOT_LEAP[month - 1] if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0) else DOOMSDAY_LEAP[month - 1] ) __snake_case : Dict = (dooms_day + day - day_anchor) % 7 return WEEK_DAY_NAMES[week_day] if __name__ == "__main__": import doctest doctest.testmod()
0
'''simple docstring''' import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) A__ : Dict = logging.getLogger() def a_ ( ) -> Tuple: __snake_case : List[Any] = argparse.ArgumentParser() parser.add_argument('-f' ) __snake_case : Any = parser.parse_args() return args.f def a_ ( _UpperCAmelCase : Optional[int] ) -> List[Any]: __snake_case : Tuple = {} __snake_case : Union[str, Any] = os.path.join(_UpperCAmelCase ,'all_results.json' ) if os.path.exists(_UpperCAmelCase ): with open(_UpperCAmelCase ,'r' ) as f: __snake_case : List[str] = json.load(_UpperCAmelCase ) else: raise ValueError(f'''can\'t find {path}''' ) return results def a_ ( ) -> Union[str, Any]: __snake_case : Union[str, Any] = torch.cuda.is_available() and torch_device == 'cuda' return is_using_cuda and is_apex_available() A__ : str = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class snake_case__ ( SCREAMING_SNAKE_CASE_ ): @classmethod def A_ ( cls : Any ) -> List[str]: '''simple docstring''' # Write Accelerate config, will pick up on CPU, GPU, and multi-GPU __snake_case : Optional[int] = tempfile.mkdtemp() __snake_case : Dict = os.path.join(cls.tmpdir , 'default_config.yml' ) write_basic_config(save_location=cls.configPath ) __snake_case : List[Any] = ['accelerate', 'launch', '--config_file', cls.configPath] @classmethod def A_ ( cls : List[str] ) -> List[str]: '''simple docstring''' shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : Any ) -> Optional[Any]: '''simple docstring''' __snake_case : List[Any] = self.get_auto_remove_tmp_dir() __snake_case : Dict = f''' {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --seed=42 --checkpointing_steps epoch --with_tracking '''.split() if is_cuda_and_apex_available(): testargs.append('--fp16' ) run_command(self._launch_args + testargs ) __snake_case : List[Any] = get_results(__a ) self.assertGreaterEqual(result['eval_accuracy'] , 0.7_5 ) self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'glue_no_trainer' ) ) ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : List[Any] ) -> Union[str, Any]: '''simple docstring''' __snake_case : Tuple = self.get_auto_remove_tmp_dir() __snake_case : str = f''' {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --block_size 128 --per_device_train_batch_size 5 --per_device_eval_batch_size 5 --num_train_epochs 2 --output_dir {tmp_dir} --checkpointing_steps epoch --with_tracking '''.split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) __snake_case : str = get_results(__a ) self.assertLess(result['perplexity'] , 100 ) self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'clm_no_trainer' ) ) ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : str ) -> List[str]: '''simple docstring''' __snake_case : int = self.get_auto_remove_tmp_dir() __snake_case : List[str] = f''' {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --num_train_epochs=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) __snake_case : List[str] = get_results(__a ) self.assertLess(result['perplexity'] , 42 ) self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'mlm_no_trainer' ) ) ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu __snake_case : Any = 7 if get_gpu_count() > 1 else 2 __snake_case : Any = self.get_auto_remove_tmp_dir() __snake_case : int = f''' {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) __snake_case : Dict = get_results(__a ) self.assertGreaterEqual(result['eval_accuracy'] , 0.7_5 ) self.assertLess(result['train_loss'] , 0.5 ) self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'ner_no_trainer' ) ) ) @unittest.skip(reason='Fix me @muellerzr' ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : Any ) -> List[Any]: '''simple docstring''' __snake_case : Any = self.get_auto_remove_tmp_dir() __snake_case : Tuple = f''' {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --seed=42 --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) __snake_case : str = get_results(__a ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result['eval_f1'] , 28 ) self.assertGreaterEqual(result['eval_exact'] , 28 ) self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'qa_no_trainer' ) ) ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : Dict ) -> List[Any]: '''simple docstring''' __snake_case : str = self.get_auto_remove_tmp_dir() __snake_case : Any = f''' {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/swag/sample.json --validation_file tests/fixtures/tests_samples/swag/sample.json --output_dir {tmp_dir} --max_train_steps=20 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --with_tracking '''.split() run_command(self._launch_args + testargs ) __snake_case : str = get_results(__a ) self.assertGreaterEqual(result['eval_accuracy'] , 0.8 ) self.assertTrue(os.path.exists(os.path.join(__a , 'swag_no_trainer' ) ) ) @slow @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : Any ) -> Union[str, Any]: '''simple docstring''' __snake_case : Tuple = self.get_auto_remove_tmp_dir() __snake_case : List[str] = f''' {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) __snake_case : int = get_results(__a ) self.assertGreaterEqual(result['eval_rouge1'] , 10 ) self.assertGreaterEqual(result['eval_rouge2'] , 2 ) self.assertGreaterEqual(result['eval_rougeL'] , 7 ) self.assertGreaterEqual(result['eval_rougeLsum'] , 7 ) self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'summarization_no_trainer' ) ) ) @slow @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : Union[str, Any] ) -> int: '''simple docstring''' __snake_case : Tuple = self.get_auto_remove_tmp_dir() __snake_case : str = f''' {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py --model_name_or_path sshleifer/student_marian_en_ro_6_1 --source_lang en --target_lang ro --train_file tests/fixtures/tests_samples/wmt16/sample.json --validation_file tests/fixtures/tests_samples/wmt16/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --num_beams=6 --learning_rate=3e-3 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --source_lang en_XX --target_lang ro_RO --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) __snake_case : Dict = get_results(__a ) self.assertGreaterEqual(result['eval_bleu'] , 30 ) self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'translation_no_trainer' ) ) ) @slow def A_ ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' __snake_case : Union[str, Any] = logging.StreamHandler(sys.stdout ) logger.addHandler(__a ) __snake_case : List[str] = self.get_auto_remove_tmp_dir() __snake_case : int = f''' {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py --dataset_name huggingface/semantic-segmentation-test-sample --output_dir {tmp_dir} --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch '''.split() run_command(self._launch_args + testargs ) __snake_case : List[str] = get_results(__a ) self.assertGreaterEqual(result['eval_overall_accuracy'] , 0.1_0 ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : Tuple ) -> Any: '''simple docstring''' __snake_case : Dict = self.get_auto_remove_tmp_dir() __snake_case : Dict = f''' {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py --model_name_or_path google/vit-base-patch16-224-in21k --dataset_name hf-internal-testing/cats_vs_dogs_sample --learning_rate 1e-4 --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --max_train_steps 2 --train_val_split 0.1 --seed 42 --output_dir {tmp_dir} --with_tracking --checkpointing_steps 1 '''.split() if is_cuda_and_apex_available(): testargs.append('--fp16' ) run_command(self._launch_args + testargs ) __snake_case : Optional[int] = get_results(__a ) # The base model scores a 25% self.assertGreaterEqual(result['eval_accuracy'] , 0.6 ) self.assertTrue(os.path.exists(os.path.join(__a , 'step_1' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'image_classification_no_trainer' ) ) )
0
1
'''simple docstring''' from torch import nn def a_ ( _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]: if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(f'''Unsupported activation function: {act_fn}''' )
0
'''simple docstring''' import math def a_ ( _UpperCAmelCase : int ) -> list: __snake_case : Optional[Any] = [True] * n __snake_case : Optional[int] = False __snake_case : Dict = False __snake_case : List[Any] = True for i in range(3 ,int(n**0.5 + 1 ) ,2 ): __snake_case : Optional[int] = i * 2 while index < n: __snake_case : Union[str, Any] = False __snake_case : int = index + i __snake_case : Dict = [2] for i in range(3 ,_UpperCAmelCase ,2 ): if is_prime[i]: primes.append(_UpperCAmelCase ) return primes def a_ ( _UpperCAmelCase : int = 99_99_66_66_33_33 ) -> int: __snake_case : List[Any] = math.floor(math.sqrt(_UpperCAmelCase ) ) + 1_00 __snake_case : Tuple = prime_sieve(_UpperCAmelCase ) __snake_case : List[Any] = 0 __snake_case : List[Any] = 0 __snake_case : Optional[int] = primes[prime_index] while (last_prime**2) <= limit: __snake_case : Optional[int] = primes[prime_index + 1] __snake_case : Union[str, Any] = last_prime**2 __snake_case : Dict = next_prime**2 # Get numbers divisible by lps(current) __snake_case : Optional[Any] = lower_bound + last_prime while upper_bound > current <= limit: matches_sum += current current += last_prime # Reset the upper_bound while (upper_bound - next_prime) > limit: upper_bound -= next_prime # Add the numbers divisible by ups(current) __snake_case : Optional[Any] = upper_bound - next_prime while current > lower_bound: matches_sum += current current -= next_prime # Remove the numbers divisible by both ups and lps __snake_case : List[str] = 0 while upper_bound > current <= limit: if current <= lower_bound: # Increment the current number current += last_prime * next_prime continue if current > limit: break # Remove twice since it was added by both ups and lps matches_sum -= current * 2 # Increment the current number current += last_prime * next_prime # Setup for next pair __snake_case : Dict = next_prime prime_index += 1 return matches_sum if __name__ == "__main__": print(solution())
0
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) A__ : Tuple = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : List[Any] = ['''PLBartTokenizer'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Dict = [ '''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''', '''PLBartForCausalLM''', '''PLBartForConditionalGeneration''', '''PLBartForSequenceClassification''', '''PLBartModel''', '''PLBartPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_plbart import PLBartTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_plbart import ( PLBART_PRETRAINED_MODEL_ARCHIVE_LIST, PLBartForCausalLM, PLBartForConditionalGeneration, PLBartForSequenceClassification, PLBartModel, PLBartPreTrainedModel, ) else: import sys A__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
0
'''simple docstring''' def a_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ) -> float: return price * (1 + tax_rate) if __name__ == "__main__": print(F"""{price_plus_tax(1_0_0, 0.25) = }""") print(F"""{price_plus_tax(1_25.50, 0.05) = }""")
0
1
'''simple docstring''' def a_ ( _UpperCAmelCase : int ) -> list[int]: if num <= 0: raise ValueError('Input must be a positive integer' ) __snake_case : Tuple = [True] * (num + 1) __snake_case : Tuple = 2 while p * p <= num: if primes[p]: for i in range(p * p ,num + 1 ,_UpperCAmelCase ): __snake_case : str = False p += 1 return [prime for prime in range(2 ,num + 1 ) if primes[prime]] if __name__ == "__main__": import doctest doctest.testmod() A__ : Any = int(input('''Enter a positive integer: ''').strip()) print(prime_sieve_eratosthenes(user_num))
0
'''simple docstring''' from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class snake_case__ ( SCREAMING_SNAKE_CASE_ ): def A_ ( self : List[Any] ) -> int: '''simple docstring''' __snake_case : Optional[int] = SMALL_MODEL_IDENTIFIER __snake_case : str = 'pt' __snake_case : Union[str, Any] = 'tf' def A_ ( self : Dict , __a : Tuple ) -> Dict: '''simple docstring''' __snake_case : Optional[int] = AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(__a ) def A_ ( self : Any , __a : Optional[Any] ) -> Dict: '''simple docstring''' __snake_case : Union[str, Any] = TFAutoModel.from_pretrained(self.test_model , from_pt=__a ) model_tf.save_pretrained(__a ) def A_ ( self : Any ) -> Tuple: '''simple docstring''' __snake_case : Tuple = 'mock_framework' # Framework provided - return whatever the user provides __snake_case : int = FeaturesManager.determine_framework(self.test_model , __a ) self.assertEqual(__a , __a ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(__a ) __snake_case : List[Any] = FeaturesManager.determine_framework(__a , __a ) self.assertEqual(__a , __a ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(__a ) __snake_case : Tuple = FeaturesManager.determine_framework(__a , __a ) self.assertEqual(__a , __a ) def A_ ( self : Union[str, Any] ) -> Any: '''simple docstring''' # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(__a ) __snake_case : Tuple = FeaturesManager.determine_framework(__a ) self.assertEqual(__a , self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(__a ) __snake_case : Union[str, Any] = FeaturesManager.determine_framework(__a ) self.assertEqual(__a , self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(__a ): __snake_case : Optional[int] = FeaturesManager.determine_framework(__a ) def A_ ( self : Any ) -> List[Any]: '''simple docstring''' __snake_case : Union[str, Any] = MagicMock(return_value=__a ) with patch('transformers.onnx.features.is_tf_available' , __a ): __snake_case : int = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(__a , self.framework_pt ) # PyTorch not in environment -> use TensorFlow __snake_case : Tuple = MagicMock(return_value=__a ) with patch('transformers.onnx.features.is_torch_available' , __a ): __snake_case : Dict = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(__a , self.framework_tf ) # Both in environment -> use PyTorch __snake_case : Optional[Any] = MagicMock(return_value=__a ) __snake_case : Tuple = MagicMock(return_value=__a ) with patch('transformers.onnx.features.is_tf_available' , __a ), patch( 'transformers.onnx.features.is_torch_available' , __a ): __snake_case : Dict = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(__a , self.framework_pt ) # Both not in environment -> raise error __snake_case : str = MagicMock(return_value=__a ) __snake_case : List[Any] = MagicMock(return_value=__a ) with patch('transformers.onnx.features.is_tf_available' , __a ), patch( 'transformers.onnx.features.is_torch_available' , __a ): with self.assertRaises(__a ): __snake_case : Tuple = FeaturesManager.determine_framework(self.test_model )
0
1
'''simple docstring''' from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def a_ ( _UpperCAmelCase : str = "laptop" ) -> DataFrame: __snake_case : List[Any] = f'''https://www.amazon.in/laptop/s?k={product}''' __snake_case : Optional[Any] = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36', 'Accept-Language': 'en-US, en;q=0.5', } __snake_case : List[str] = BeautifulSoup(requests.get(_UpperCAmelCase ,headers=_UpperCAmelCase ).text ) # Initialize a Pandas dataframe with the column titles __snake_case : Optional[int] = DataFrame( columns=[ 'Product Title', 'Product Link', 'Current Price of the product', 'Product Rating', 'MRP of the product', 'Discount', ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( 'div' ,attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} ,) ,soup.find_all('div' ,attrs={'class': 'a-row a-size-base a-color-base'} ) ,): try: __snake_case : List[Any] = item.ha.text __snake_case : Optional[int] = 'https://www.amazon.in/' + item.ha.a['href'] __snake_case : Optional[Any] = item.find('span' ,attrs={'class': 'a-offscreen'} ).text try: __snake_case : Any = item.find('span' ,attrs={'class': 'a-icon-alt'} ).text except AttributeError: __snake_case : Optional[Any] = 'Not available' try: __snake_case : Any = ( '₹' + item.find( 'span' ,attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1] ) except AttributeError: __snake_case : Optional[Any] = '' try: __snake_case : List[str] = float( ( ( float(product_mrp.strip('₹' ).replace(',' ,'' ) ) - float(product_price.strip('₹' ).replace(',' ,'' ) ) ) / float(product_mrp.strip('₹' ).replace(',' ,'' ) ) ) * 1_00 ) except ValueError: __snake_case : Optional[Any] = float('nan' ) except AttributeError: pass __snake_case : Tuple = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] __snake_case : Any = ' ' __snake_case : Tuple = ' ' data_frame.index += 1 return data_frame if __name__ == "__main__": A__ : List[Any] = '''headphones''' get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
0
'''simple docstring''' import os import unittest from transformers import BatchEncoding from transformers.models.bert.tokenization_bert import ( BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer from transformers.testing_utils import require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): A__ = ProphetNetTokenizer A__ = False def A_ ( self : Optional[int] ) -> Dict: '''simple docstring''' super().setUp() __snake_case : Dict = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] __snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def A_ ( self : int , __a : Union[str, Any] ) -> List[str]: '''simple docstring''' __snake_case : Optional[int] = 'UNwant\u00E9d,running' __snake_case : List[str] = 'unwanted, running' return input_text, output_text def A_ ( self : Union[str, Any] ) -> str: '''simple docstring''' __snake_case : Dict = self.tokenizer_class(self.vocab_file ) __snake_case : List[str] = tokenizer.tokenize('UNwant\u00E9d,running' ) self.assertListEqual(__a , ['un', '##want', '##ed', ',', 'runn', '##ing'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [9, 6, 7, 12, 10, 11] ) def A_ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' __snake_case : List[str] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] ) def A_ ( self : Union[str, Any] ) -> str: '''simple docstring''' __snake_case : Optional[int] = BasicTokenizer(do_lower_case=__a ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def A_ ( self : Dict ) -> Optional[int]: '''simple docstring''' __snake_case : List[Any] = BasicTokenizer(do_lower_case=__a , strip_accents=__a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] ) def A_ ( self : int ) -> Any: '''simple docstring''' __snake_case : int = BasicTokenizer(do_lower_case=__a , strip_accents=__a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def A_ ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' __snake_case : Union[str, Any] = BasicTokenizer(do_lower_case=__a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def A_ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' __snake_case : Dict = BasicTokenizer(do_lower_case=__a ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] ) def A_ ( self : Any ) -> List[str]: '''simple docstring''' __snake_case : str = BasicTokenizer(do_lower_case=__a , strip_accents=__a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] ) def A_ ( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' __snake_case : List[Any] = BasicTokenizer(do_lower_case=__a , strip_accents=__a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] ) def A_ ( self : Optional[int] ) -> List[str]: '''simple docstring''' __snake_case : Optional[Any] = BasicTokenizer(do_lower_case=__a , never_split=['[UNK]'] ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] ) def A_ ( self : Optional[int] ) -> List[Any]: '''simple docstring''' __snake_case : Any = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing'] __snake_case : List[Any] = {} for i, token in enumerate(__a ): __snake_case : List[str] = i __snake_case : Any = WordpieceTokenizer(vocab=__a , unk_token='[UNK]' ) self.assertListEqual(tokenizer.tokenize('' ) , [] ) self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] ) self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] ) @require_torch def A_ ( self : Union[str, Any] ) -> Tuple: '''simple docstring''' __snake_case : Optional[Any] = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' ) __snake_case : int = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] __snake_case : str = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102] __snake_case : Union[str, Any] = tokenizer(__a , padding=__a , return_tensors='pt' ) self.assertIsInstance(__a , __a ) __snake_case : int = list(batch.input_ids.numpy()[0] ) self.assertListEqual(__a , __a ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) def A_ ( self : Union[str, Any] ) -> Any: '''simple docstring''' self.assertTrue(_is_whitespace(' ' ) ) self.assertTrue(_is_whitespace('\t' ) ) self.assertTrue(_is_whitespace('\r' ) ) self.assertTrue(_is_whitespace('\n' ) ) self.assertTrue(_is_whitespace('\u00A0' ) ) self.assertFalse(_is_whitespace('A' ) ) self.assertFalse(_is_whitespace('-' ) ) def A_ ( self : Dict ) -> Optional[Any]: '''simple docstring''' self.assertTrue(_is_control('\u0005' ) ) self.assertFalse(_is_control('A' ) ) self.assertFalse(_is_control(' ' ) ) self.assertFalse(_is_control('\t' ) ) self.assertFalse(_is_control('\r' ) ) def A_ ( self : List[Any] ) -> int: '''simple docstring''' self.assertTrue(_is_punctuation('-' ) ) self.assertTrue(_is_punctuation('$' ) ) self.assertTrue(_is_punctuation('`' ) ) self.assertTrue(_is_punctuation('.' ) ) self.assertFalse(_is_punctuation('A' ) ) self.assertFalse(_is_punctuation(' ' ) ) @slow def A_ ( self : str ) -> Optional[int]: '''simple docstring''' __snake_case : str = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' ) __snake_case : Optional[int] = tokenizer.encode('sequence builders' , add_special_tokens=__a ) __snake_case : Optional[int] = tokenizer.encode('multi-sequence build' , add_special_tokens=__a ) __snake_case : Optional[Any] = tokenizer.build_inputs_with_special_tokens(__a ) __snake_case : List[Any] = tokenizer.build_inputs_with_special_tokens(__a , __a ) assert encoded_sentence == text + [102] assert encoded_pair == text + [102] + text_a + [102]
0
1
'''simple docstring''' import argparse import os import torch from transformers import FlavaImageCodebook, FlavaImageCodebookConfig def a_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : int ,_UpperCAmelCase : Tuple ) -> int: __snake_case : int = s.rsplit(_UpperCAmelCase ,_UpperCAmelCase ) return new.join(_UpperCAmelCase ) def a_ ( _UpperCAmelCase : Union[str, Any] ) -> Optional[Any]: # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() ) def a_ ( _UpperCAmelCase : Tuple ) -> int: __snake_case : Union[str, Any] = {} __snake_case : str = ['group_1', 'group_2', 'group_3', 'group_4'] for key, value in state_dict.items(): for group_key in group_keys: if group_key in key: __snake_case : Tuple = key.replace(f'''{group_key}.''' ,f'''{group_key}.group.''' ) if "res_path" in key: __snake_case : Optional[int] = key.replace('res_path.' ,'res_path.path.' ) if key.endswith('.w' ): __snake_case : Dict = rreplace(_UpperCAmelCase ,'.w' ,'.weight' ,1 ) if key.endswith('.b' ): __snake_case : List[Any] = rreplace(_UpperCAmelCase ,'.b' ,'.bias' ,1 ) __snake_case : List[str] = value.float() return upgrade @torch.no_grad() def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[int]=None ,_UpperCAmelCase : List[str]=True ) -> List[Any]: from dall_e import Encoder __snake_case : List[str] = Encoder() if os.path.exists(_UpperCAmelCase ): __snake_case : str = torch.load(_UpperCAmelCase ) else: __snake_case : Any = torch.hub.load_state_dict_from_url(_UpperCAmelCase ) if isinstance(_UpperCAmelCase ,_UpperCAmelCase ): __snake_case : Union[str, Any] = ckpt.state_dict() encoder.load_state_dict(_UpperCAmelCase ) if config_path is not None: __snake_case : Union[str, Any] = FlavaImageCodebookConfig.from_pretrained(_UpperCAmelCase ) else: __snake_case : Tuple = FlavaImageCodebookConfig() __snake_case : str = FlavaImageCodebook(_UpperCAmelCase ).eval() __snake_case : int = encoder.state_dict() __snake_case : Dict = upgrade_state_dict(_UpperCAmelCase ) hf_model.load_state_dict(_UpperCAmelCase ) __snake_case : Optional[Any] = hf_model.state_dict() __snake_case : Union[str, Any] = count_parameters(_UpperCAmelCase ) __snake_case : List[str] = count_parameters(_UpperCAmelCase ) assert torch.allclose(_UpperCAmelCase ,_UpperCAmelCase ,atol=1E-3 ) if save_checkpoint: hf_model.save_pretrained(_UpperCAmelCase ) else: return hf_state_dict if __name__ == "__main__": A__ : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') A__ : Union[str, Any] = parser.parse_args() convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A__ : Optional[Any] = { '''configuration_nllb_moe''': [ '''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NllbMoeConfig''', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Dict = [ '''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''NllbMoeForConditionalGeneration''', '''NllbMoeModel''', '''NllbMoePreTrainedModel''', '''NllbMoeTop2Router''', '''NllbMoeSparseMLP''', ] if TYPE_CHECKING: from .configuration_nllb_moe import ( NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, NllbMoeConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nllb_moe import ( NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST, NllbMoeForConditionalGeneration, NllbMoeModel, NllbMoePreTrainedModel, NllbMoeSparseMLP, NllbMoeTopaRouter, ) else: import sys A__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
0
1
'''simple docstring''' from __future__ import annotations import matplotlib.pyplot as plt # type: ignore import numpy # initial triangle of Koch snowflake A__ : Dict = numpy.array([0, 0]) A__ : Tuple = numpy.array([0.5, 0.8_66_02_54]) A__ : Any = numpy.array([1, 0]) A__ : Dict = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1] def a_ ( _UpperCAmelCase : list[numpy.ndarray] ,_UpperCAmelCase : int ) -> list[numpy.ndarray]: __snake_case : Tuple = initial_vectors for _ in range(_UpperCAmelCase ): __snake_case : Tuple = iteration_step(_UpperCAmelCase ) return vectors def a_ ( _UpperCAmelCase : list[numpy.ndarray] ) -> list[numpy.ndarray]: __snake_case : List[Any] = [] for i, start_vector in enumerate(vectors[:-1] ): __snake_case : Union[str, Any] = vectors[i + 1] new_vectors.append(_UpperCAmelCase ) __snake_case : Union[str, Any] = end_vector - start_vector new_vectors.append(start_vector + difference_vector / 3 ) new_vectors.append( start_vector + difference_vector / 3 + rotate(difference_vector / 3 ,60 ) ) new_vectors.append(start_vector + difference_vector * 2 / 3 ) new_vectors.append(vectors[-1] ) return new_vectors def a_ ( _UpperCAmelCase : numpy.ndarray ,_UpperCAmelCase : float ) -> numpy.ndarray: __snake_case : int = numpy.radians(_UpperCAmelCase ) __snake_case , __snake_case : Tuple = numpy.cos(_UpperCAmelCase ), numpy.sin(_UpperCAmelCase ) __snake_case : Union[str, Any] = numpy.array(((c, -s), (s, c)) ) return numpy.dot(_UpperCAmelCase ,_UpperCAmelCase ) def a_ ( _UpperCAmelCase : list[numpy.ndarray] ) -> None: __snake_case : List[str] = plt.gca() axes.set_aspect('equal' ) # matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all # y-coordinates as inputs, which are constructed from the vector-list using # zip() __snake_case , __snake_case : List[Any] = zip(*_UpperCAmelCase ) plt.plot(_UpperCAmelCase ,_UpperCAmelCase ) plt.show() if __name__ == "__main__": import doctest doctest.testmod() A__ : Optional[Any] = iterate(INITIAL_VECTORS, 5) plot(processed_vectors)
0
'''simple docstring''' def a_ ( _UpperCAmelCase : int ) -> list: # bit count represents no. of bits in the gray code if bit_count < 0: raise ValueError('The given input must be positive' ) # get the generated string sequence __snake_case : Optional[Any] = gray_code_sequence_string(_UpperCAmelCase ) # # convert them to integers for i in range(len(_UpperCAmelCase ) ): __snake_case : Optional[Any] = int(sequence[i] ,2 ) return sequence def a_ ( _UpperCAmelCase : int ) -> list: # The approach is a recursive one # Base case achieved when either n = 0 or n=1 if bit_count == 0: return ["0"] if bit_count == 1: return ["0", "1"] __snake_case : Dict = 1 << bit_count # defines the length of the sequence # 1<< n is equivalent to 2^n # recursive answer will generate answer for n-1 bits __snake_case : Dict = gray_code_sequence_string(bit_count - 1 ) __snake_case : Any = [] # append 0 to first half of the smaller sequence generated for i in range(seq_len // 2 ): __snake_case : str = '0' + smaller_sequence[i] sequence.append(_UpperCAmelCase ) # append 1 to second half ... start from the end of the list for i in reversed(range(seq_len // 2 ) ): __snake_case : Any = '1' + smaller_sequence[i] sequence.append(_UpperCAmelCase ) return sequence if __name__ == "__main__": import doctest doctest.testmod()
0
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_owlvit import OwlViTImageProcessor A__ : Dict = logging.get_logger(__name__) class snake_case__ ( SCREAMING_SNAKE_CASE_ ): def __init__( self : Tuple , *__a : List[Any] , **__a : Any ) -> None: '''simple docstring''' warnings.warn( 'The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use OwlViTImageProcessor instead.' , __a , ) super().__init__(*__a , **__a )
0
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class snake_case__ ( unittest.TestCase ): def A_ ( self : int ) -> List[Any]: '''simple docstring''' __snake_case : Any = tempfile.mkdtemp() # fmt: off __snake_case : List[str] = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest'] # fmt: on __snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) __snake_case : List[str] = { 'do_resize': True, 'size': {'height': 18, 'width': 18}, 'do_normalize': True, 'image_mean': [0.5, 0.5, 0.5], 'image_std': [0.5, 0.5, 0.5], } __snake_case : Optional[Any] = os.path.join(self.tmpdirname , __a ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(__a , __a ) def A_ ( self : Optional[int] , **__a : Dict ) -> int: '''simple docstring''' return BertTokenizer.from_pretrained(self.tmpdirname , **__a ) def A_ ( self : int , **__a : Dict ) -> Tuple: '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname , **__a ) def A_ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def A_ ( self : str ) -> List[str]: '''simple docstring''' __snake_case : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] __snake_case : List[str] = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs] return image_inputs def A_ ( self : List[str] ) -> Optional[int]: '''simple docstring''' __snake_case : Union[str, Any] = self.get_tokenizer() __snake_case : Dict = self.get_image_processor() __snake_case : Any = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) processor.save_pretrained(self.tmpdirname ) __snake_case : Any = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , __a ) def A_ ( self : str ) -> Optional[int]: '''simple docstring''' __snake_case : Optional[Any] = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __snake_case : Optional[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) __snake_case : Tuple = self.get_image_processor(do_normalize=__a , padding_value=1.0 ) __snake_case : Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __a ) def A_ ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' __snake_case : Tuple = self.get_image_processor() __snake_case : int = self.get_tokenizer() __snake_case : str = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) __snake_case : int = self.prepare_image_inputs() __snake_case : List[str] = image_processor(__a , return_tensors='np' ) __snake_case : List[str] = processor(images=__a , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def A_ ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' __snake_case : Dict = self.get_image_processor() __snake_case : int = self.get_tokenizer() __snake_case : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) __snake_case : Optional[int] = 'lower newer' __snake_case : Dict = processor(text=__a ) __snake_case : List[Any] = tokenizer(__a ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def A_ ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' __snake_case : Dict = self.get_image_processor() __snake_case : Union[str, Any] = self.get_tokenizer() __snake_case : int = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) __snake_case : List[Any] = 'lower newer' __snake_case : Optional[Any] = self.prepare_image_inputs() __snake_case : Union[str, Any] = processor(text=__a , images=__a ) self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with self.assertRaises(__a ): processor() def A_ ( self : Tuple ) -> Any: '''simple docstring''' __snake_case : Union[str, Any] = self.get_image_processor() __snake_case : Any = self.get_tokenizer() __snake_case : Dict = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) __snake_case : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __snake_case : int = processor.batch_decode(__a ) __snake_case : Optional[Any] = tokenizer.batch_decode(__a ) self.assertListEqual(__a , __a ) def A_ ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' __snake_case : List[str] = self.get_image_processor() __snake_case : Dict = self.get_tokenizer() __snake_case : Dict = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) __snake_case : Union[str, Any] = 'lower newer' __snake_case : Tuple = self.prepare_image_inputs() __snake_case : Union[str, Any] = processor(text=__a , images=__a ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
0
1
'''simple docstring''' from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax.numpy as jnp from jax import random from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .scheduling_utils_flax import FlaxSchedulerMixin @flax.struct.dataclass class snake_case__ : # setable values A__ = None A__ = None A__ = None # sigma(t_i) @classmethod def A_ ( cls : Optional[int] ) -> List[str]: '''simple docstring''' return cls() @dataclass class snake_case__ ( SCREAMING_SNAKE_CASE_ ): A__ = 42 A__ = 42 A__ = 42 class snake_case__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): @property def A_ ( self : Dict ) -> List[str]: '''simple docstring''' return True @register_to_config def __init__( self : Any , __a : float = 0.0_2 , __a : float = 100 , __a : float = 1.0_0_7 , __a : float = 80 , __a : float = 0.0_5 , __a : float = 50 , ) -> Dict: '''simple docstring''' pass def A_ ( self : int ) -> Union[str, Any]: '''simple docstring''' return KarrasVeSchedulerState.create() def A_ ( self : Dict , __a : KarrasVeSchedulerState , __a : int , __a : Tuple = () ) -> KarrasVeSchedulerState: '''simple docstring''' __snake_case : Dict = jnp.arange(0 , __a )[::-1].copy() __snake_case : List[Any] = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in timesteps ] return state.replace( num_inference_steps=__a , schedule=jnp.array(__a , dtype=jnp.floataa ) , timesteps=__a , ) def A_ ( self : List[Any] , __a : KarrasVeSchedulerState , __a : jnp.ndarray , __a : float , __a : random.KeyArray , ) -> Tuple[jnp.ndarray, float]: '''simple docstring''' if self.config.s_min <= sigma <= self.config.s_max: __snake_case : Union[str, Any] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 ) else: __snake_case : Optional[Any] = 0 # sample eps ~ N(0, S_noise^2 * I) __snake_case : Optional[int] = random.split(__a , num=1 ) __snake_case : List[str] = self.config.s_noise * random.normal(key=__a , shape=sample.shape ) __snake_case : Optional[Any] = sigma + gamma * sigma __snake_case : Dict = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def A_ ( self : List[str] , __a : KarrasVeSchedulerState , __a : jnp.ndarray , __a : float , __a : float , __a : jnp.ndarray , __a : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]: '''simple docstring''' __snake_case : Union[str, Any] = sample_hat + sigma_hat * model_output __snake_case : str = (sample_hat - pred_original_sample) / sigma_hat __snake_case : int = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=__a , derivative=__a , state=__a ) def A_ ( self : Tuple , __a : KarrasVeSchedulerState , __a : jnp.ndarray , __a : float , __a : float , __a : jnp.ndarray , __a : jnp.ndarray , __a : jnp.ndarray , __a : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]: '''simple docstring''' __snake_case : Union[str, Any] = sample_prev + sigma_prev * model_output __snake_case : Optional[int] = (sample_prev - pred_original_sample) / sigma_prev __snake_case : Tuple = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative, state) return FlaxKarrasVeOutput(prev_sample=__a , derivative=__a , state=__a ) def A_ ( self : Dict , __a : KarrasVeSchedulerState , __a : Any , __a : Dict , __a : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' raise NotImplementedError()
0
'''simple docstring''' import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def a_ ( _UpperCAmelCase : List[Any] ) -> Tuple: __snake_case : str = [] embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''', f'''stage{idx}.patch_embed.proj.weight''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''', f'''stage{idx}.patch_embed.proj.bias''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''', f'''stage{idx}.patch_embed.norm.weight''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''', f'''stage{idx}.patch_embed.norm.bias''', ) ) return embed def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : Optional[int] ) -> List[str]: __snake_case : Tuple = [] attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj.bias''', ) ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') ) return attention_weights def a_ ( _UpperCAmelCase : Union[str, Any] ) -> Dict: __snake_case : Union[str, Any] = [] token.append((f'''cvt.encoder.stages.{idx}.cls_token''', 'stage2.cls_token') ) return token def a_ ( ) -> Optional[Any]: __snake_case : Any = [] head.append(('layernorm.weight', 'norm.weight') ) head.append(('layernorm.bias', 'norm.bias') ) head.append(('classifier.weight', 'head.weight') ) head.append(('classifier.bias', 'head.bias') ) return head def a_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Any ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[Any] ) -> Tuple: __snake_case : List[str] = 'imagenet-1k-id2label.json' __snake_case : Dict = 10_00 __snake_case : Union[str, Any] = 'huggingface/label-files' __snake_case : str = num_labels __snake_case : str = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase ,_UpperCAmelCase ,repo_type='dataset' ) ) ,'r' ) ) __snake_case : Tuple = {int(_UpperCAmelCase ): v for k, v in idalabel.items()} __snake_case : Optional[Any] = idalabel __snake_case : str = {v: k for k, v in idalabel.items()} __snake_case : Dict = CvtConfig(num_labels=_UpperCAmelCase ,idalabel=_UpperCAmelCase ,labelaid=_UpperCAmelCase ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit('/' ,1 )[-1][4:6] == "13": __snake_case : Tuple = [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit('/' ,1 )[-1][4:6] == "21": __snake_case : str = [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: __snake_case : Dict = [2, 2, 20] __snake_case : Any = [3, 12, 16] __snake_case : Tuple = [1_92, 7_68, 10_24] __snake_case : str = CvtForImageClassification(_UpperCAmelCase ) __snake_case : List[Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ) __snake_case : int = image_size __snake_case : int = torch.load(_UpperCAmelCase ,map_location=torch.device('cpu' ) ) __snake_case : List[Any] = OrderedDict() __snake_case : Union[str, Any] = [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: __snake_case : Optional[Any] = list_of_state_dict + cls_token(_UpperCAmelCase ) __snake_case : Tuple = list_of_state_dict + embeddings(_UpperCAmelCase ) for cnt in range(config.depth[idx] ): __snake_case : Optional[int] = list_of_state_dict + attention(_UpperCAmelCase ,_UpperCAmelCase ) __snake_case : str = list_of_state_dict + final() for gg in list_of_state_dict: print(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) ): __snake_case : List[str] = original_weights[list_of_state_dict[i][1]] model.load_state_dict(_UpperCAmelCase ) model.save_pretrained(_UpperCAmelCase ) image_processor.save_pretrained(_UpperCAmelCase ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": A__ : Dict = argparse.ArgumentParser() parser.add_argument( '''--cvt_model''', default='''cvt-w24''', type=str, help='''Name of the cvt model you\'d like to convert.''', ) parser.add_argument( '''--image_size''', default=3_8_4, type=int, help='''Input Image Size''', ) parser.add_argument( '''--cvt_file_name''', default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''', type=str, help='''Input Image Size''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) A__ : Tuple = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
0
1
'''simple docstring''' import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset A__ : Optional[int] = '''bert-base-cased''' A__ : Optional[Any] = '''google/pegasus-xsum''' A__ : List[Any] = [''' Sam ate lunch today.''', '''Sams lunch ingredients.'''] A__ : str = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee'''] A__ : str = '''patrickvonplaten/t5-tiny-random''' A__ : Union[str, Any] = '''sshleifer/bart-tiny-random''' A__ : List[Any] = '''sshleifer/tiny-mbart''' A__ : Dict = '''sshleifer/tiny-marian-en-de''' def a_ ( _UpperCAmelCase : Path ,_UpperCAmelCase : list ) -> List[str]: __snake_case : Tuple = '\n'.join(_UpperCAmelCase ) Path(_UpperCAmelCase ).open('w' ).writelines(_UpperCAmelCase ) def a_ ( _UpperCAmelCase : str ) -> Any: for split in ["train", "val", "test"]: _dump_articles(os.path.join(_UpperCAmelCase ,f'''{split}.source''' ) ,_UpperCAmelCase ) _dump_articles(os.path.join(_UpperCAmelCase ,f'''{split}.target''' ) ,_UpperCAmelCase ) return tmp_dir class snake_case__ ( SCREAMING_SNAKE_CASE_ ): @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def A_ ( self : str , __a : Tuple ) -> str: '''simple docstring''' __snake_case : List[str] = AutoTokenizer.from_pretrained(__a ) __snake_case : int = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) __snake_case : int = max(len(tokenizer.encode(__a ) ) for a in ARTICLES ) __snake_case : Dict = max(len(tokenizer.encode(__a ) ) for a in SUMMARIES ) __snake_case : Any = 4 __snake_case : Optional[Any] = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated __snake_case , __snake_case : Optional[int] = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error. __snake_case : Any = SeqaSeqDataset( __a , data_dir=__a , type_path='train' , max_source_length=__a , max_target_length=__a , src_lang=__a , tgt_lang=__a , ) __snake_case : Union[str, Any] = DataLoader(__a , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(__a , __a ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place __snake_case : Tuple = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def A_ ( self : Optional[Any] , __a : Union[str, Any] ) -> int: '''simple docstring''' __snake_case : int = AutoTokenizer.from_pretrained(__a ) __snake_case : Optional[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) __snake_case : str = max(len(tokenizer.encode(__a ) ) for a in ARTICLES ) __snake_case : Union[str, Any] = max(len(tokenizer.encode(__a ) ) for a in SUMMARIES ) __snake_case : Any = 4 __snake_case : Dict = LegacySeqaSeqDataset( __a , data_dir=__a , type_path='train' , max_source_length=20 , max_target_length=__a , ) __snake_case : str = DataLoader(__a , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 20 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def A_ ( self : List[Any] ) -> str: '''simple docstring''' __snake_case : str = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' ) __snake_case : Dict = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) __snake_case : Any = tmp_dir.joinpath('train.source' ).open().readlines() __snake_case : List[str] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(__a , __a , 128 , __a ) __snake_case : int = {x.name for x in tmp_dir.iterdir()} __snake_case : int = {x.name for x in save_dir.iterdir()} __snake_case : Dict = save_dir.joinpath('train.source' ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(__a ) < len(__a ) assert len(__a ) == 1 assert len(packed_examples[0] ) == sum(len(__a ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' ) def A_ ( self : Any ) -> Union[str, Any]: '''simple docstring''' if not FAIRSEQ_AVAILABLE: return __snake_case , __snake_case , __snake_case : str = self._get_dataset(max_len=64 ) __snake_case : Union[str, Any] = 64 __snake_case : Union[str, Any] = ds.make_dynamic_sampler(__a , required_batch_size_multiple=__a ) __snake_case : Any = [len(__a ) for x in batch_sampler] assert len(set(__a ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(__a ) == len(__a ) # no dropped or added examples __snake_case : Union[str, Any] = DataLoader(__a , batch_sampler=__a , collate_fn=ds.collate_fn , num_workers=2 ) __snake_case : Optional[Any] = [] __snake_case : Dict = [] for batch in data_loader: __snake_case : int = batch['input_ids'].shape __snake_case : List[Any] = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple __snake_case : Tuple = np.product(batch['input_ids'].shape ) num_src_per_batch.append(__a ) if num_src_tokens > (max_tokens * 1.1): failures.append(__a ) assert num_src_per_batch[0] == max(__a ) if failures: raise AssertionError(f'''too many tokens in {len(__a )} batches''' ) def A_ ( self : Tuple ) -> Dict: '''simple docstring''' __snake_case , __snake_case , __snake_case : Union[str, Any] = self._get_dataset(max_len=512 ) __snake_case : Any = 2 __snake_case : Dict = ds.make_sortish_sampler(__a , shuffle=__a ) __snake_case : List[Any] = DataLoader(__a , batch_size=__a , collate_fn=ds.collate_fn , num_workers=2 ) __snake_case : int = DataLoader(__a , batch_size=__a , collate_fn=ds.collate_fn , num_workers=2 , sampler=__a ) __snake_case : Any = tokenizer.pad_token_id def count_pad_tokens(__a : Tuple , __a : Optional[int]="input_ids" ): return [batch[k].eq(__a ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(__a , k='labels' ) ) < sum(count_pad_tokens(__a , k='labels' ) ) assert sum(count_pad_tokens(__a ) ) < sum(count_pad_tokens(__a ) ) assert len(__a ) == len(__a ) def A_ ( self : Optional[Any] , __a : int=1000 , __a : int=128 ) -> str: '''simple docstring''' if os.getenv('USE_REAL_DATA' , __a ): __snake_case : str = 'examples/seq2seq/wmt_en_ro' __snake_case : Union[str, Any] = max_len * 2 * 64 if not Path(__a ).joinpath('train.len' ).exists(): save_len_file(__a , __a ) else: __snake_case : List[Any] = 'examples/seq2seq/test_data/wmt_en_ro' __snake_case : List[Any] = max_len * 4 save_len_file(__a , __a ) __snake_case : Optional[Any] = AutoTokenizer.from_pretrained(__a ) __snake_case : Optional[Any] = SeqaSeqDataset( __a , data_dir=__a , type_path='train' , max_source_length=__a , max_target_length=__a , n_obs=__a , ) return ds, max_tokens, tokenizer def A_ ( self : Any ) -> Dict: '''simple docstring''' __snake_case , __snake_case , __snake_case : List[str] = self._get_dataset() __snake_case : Dict = set(DistributedSortishSampler(__a , 256 , num_replicas=2 , rank=0 , add_extra_examples=__a ) ) __snake_case : str = set(DistributedSortishSampler(__a , 256 , num_replicas=2 , rank=1 , add_extra_examples=__a ) ) assert idsa.intersection(__a ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def A_ ( self : int , __a : Optional[int] ) -> Union[str, Any]: '''simple docstring''' __snake_case : Any = AutoTokenizer.from_pretrained(__a , use_fast=__a ) if tok_name == MBART_TINY: __snake_case : Union[str, Any] = SeqaSeqDataset( __a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , ) __snake_case : List[Any] = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: __snake_case : Optional[Any] = SeqaSeqDataset( __a , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , ) __snake_case : Dict = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(__a ) == 1 if tok_name == BART_TINY else len(__a ) == 0
0
'''simple docstring''' from __future__ import annotations A__ : List[Any] = list[list[int]] # assigning initial values to the grid A__ : Matrix = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution A__ : Matrix = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def a_ ( _UpperCAmelCase : Matrix ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ) -> bool: for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def a_ ( _UpperCAmelCase : Matrix ) -> tuple[int, int] | None: for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def a_ ( _UpperCAmelCase : Matrix ) -> Matrix | None: if location := find_empty_location(_UpperCAmelCase ): __snake_case , __snake_case : Optional[int] = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 ,10 ): if is_safe(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ): __snake_case : Union[str, Any] = digit if sudoku(_UpperCAmelCase ) is not None: return grid __snake_case : Optional[Any] = 0 return None def a_ ( _UpperCAmelCase : Matrix ) -> None: for row in grid: for cell in row: print(_UpperCAmelCase ,end=' ' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print('''\nExample grid:\n''' + '''=''' * 2_0) print_solution(example_grid) print('''\nExample grid solution:''') A__ : List[str] = sudoku(example_grid) if solution is not None: print_solution(solution) else: print('''Cannot find a solution.''')
0
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A__ : Any = { '''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Optional[int] = [ '''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MegatronBertForCausalLM''', '''MegatronBertForMaskedLM''', '''MegatronBertForMultipleChoice''', '''MegatronBertForNextSentencePrediction''', '''MegatronBertForPreTraining''', '''MegatronBertForQuestionAnswering''', '''MegatronBertForSequenceClassification''', '''MegatronBertForTokenClassification''', '''MegatronBertModel''', '''MegatronBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_megatron_bert import ( MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, MegatronBertPreTrainedModel, ) else: import sys A__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
0
'''simple docstring''' import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import enable_full_determinism, skip_mps from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): A__ = KandinskyVaaPriorPipeline A__ = ['''prompt'''] A__ = ['''prompt''', '''negative_prompt'''] A__ = [ '''num_images_per_prompt''', '''generator''', '''num_inference_steps''', '''latents''', '''negative_prompt''', '''guidance_scale''', '''output_type''', '''return_dict''', ] A__ = False @property def A_ ( self : Dict ) -> List[str]: '''simple docstring''' return 32 @property def A_ ( self : Any ) -> str: '''simple docstring''' return 32 @property def A_ ( self : str ) -> Optional[int]: '''simple docstring''' return self.time_input_dim @property def A_ ( self : str ) -> int: '''simple docstring''' return self.time_input_dim * 4 @property def A_ ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' return 100 @property def A_ ( self : Tuple ) -> List[str]: '''simple docstring''' __snake_case : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def A_ ( self : Dict ) -> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) __snake_case : Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(__a ) @property def A_ ( self : Union[str, Any] ) -> Any: '''simple docstring''' torch.manual_seed(0 ) __snake_case : Any = { 'num_attention_heads': 2, 'attention_head_dim': 12, 'embedding_dim': self.text_embedder_hidden_size, 'num_layers': 1, } __snake_case : List[Any] = PriorTransformer(**__a ) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 __snake_case : Any = nn.Parameter(torch.ones(model.clip_std.shape ) ) return model @property def A_ ( self : List[str] ) -> List[str]: '''simple docstring''' torch.manual_seed(0 ) __snake_case : Optional[Any] = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , ) __snake_case : Optional[Any] = CLIPVisionModelWithProjection(__a ) return model @property def A_ ( self : Dict ) -> List[Any]: '''simple docstring''' __snake_case : Dict = CLIPImageProcessor( crop_size=224 , do_center_crop=__a , do_normalize=__a , do_resize=__a , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=224 , ) return image_processor def A_ ( self : Dict ) -> Optional[int]: '''simple docstring''' __snake_case : Tuple = self.dummy_prior __snake_case : List[str] = self.dummy_image_encoder __snake_case : str = self.dummy_text_encoder __snake_case : List[str] = self.dummy_tokenizer __snake_case : List[str] = self.dummy_image_processor __snake_case : Any = UnCLIPScheduler( variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=__a , clip_sample_range=1_0.0 , ) __snake_case : str = { 'prior': prior, 'image_encoder': image_encoder, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'scheduler': scheduler, 'image_processor': image_processor, } return components def A_ ( self : List[Any] , __a : Optional[Any] , __a : Tuple=0 ) -> Any: '''simple docstring''' if str(__a ).startswith('mps' ): __snake_case : List[str] = torch.manual_seed(__a ) else: __snake_case : List[str] = torch.Generator(device=__a ).manual_seed(__a ) __snake_case : List[Any] = { 'prompt': 'horse', 'generator': generator, 'guidance_scale': 4.0, 'num_inference_steps': 2, 'output_type': 'np', } return inputs def A_ ( self : str ) -> Dict: '''simple docstring''' __snake_case : str = 'cpu' __snake_case : List[str] = self.get_dummy_components() __snake_case : Tuple = self.pipeline_class(**__a ) __snake_case : Optional[Any] = pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) __snake_case : Optional[int] = pipe(**self.get_dummy_inputs(__a ) ) __snake_case : List[str] = output.image_embeds __snake_case : str = pipe( **self.get_dummy_inputs(__a ) , return_dict=__a , )[0] __snake_case : Union[str, Any] = image[0, -10:] __snake_case : Any = image_from_tuple[0, -10:] assert image.shape == (1, 32) __snake_case : List[Any] = np.array( [-0.0_5_3_2, 1.7_1_2_0, 0.3_6_5_6, -1.0_8_5_2, -0.8_9_4_6, -1.1_7_5_6, 0.4_3_4_8, 0.2_4_8_2, 0.5_1_4_6, -0.1_1_5_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def A_ ( self : Tuple ) -> Optional[int]: '''simple docstring''' __snake_case : Union[str, Any] = torch_device == 'cpu' __snake_case : Dict = True __snake_case : Union[str, Any] = False self._test_inference_batch_single_identical( test_max_difference=__a , relax_max_difference=__a , test_mean_pixel_difference=__a , ) @skip_mps def A_ ( self : str ) -> Union[str, Any]: '''simple docstring''' __snake_case : Dict = torch_device == 'cpu' __snake_case : Optional[Any] = False self._test_attention_slicing_forward_pass( test_max_difference=__a , test_mean_pixel_difference=__a , )
0
1
'''simple docstring''' import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) A__ : Dict = logging.getLogger() def a_ ( ) -> Tuple: __snake_case : List[Any] = argparse.ArgumentParser() parser.add_argument('-f' ) __snake_case : Any = parser.parse_args() return args.f def a_ ( _UpperCAmelCase : Optional[int] ) -> List[Any]: __snake_case : Tuple = {} __snake_case : Union[str, Any] = os.path.join(_UpperCAmelCase ,'all_results.json' ) if os.path.exists(_UpperCAmelCase ): with open(_UpperCAmelCase ,'r' ) as f: __snake_case : List[str] = json.load(_UpperCAmelCase ) else: raise ValueError(f'''can\'t find {path}''' ) return results def a_ ( ) -> Union[str, Any]: __snake_case : Union[str, Any] = torch.cuda.is_available() and torch_device == 'cuda' return is_using_cuda and is_apex_available() A__ : str = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class snake_case__ ( SCREAMING_SNAKE_CASE_ ): @classmethod def A_ ( cls : Any ) -> List[str]: '''simple docstring''' # Write Accelerate config, will pick up on CPU, GPU, and multi-GPU __snake_case : Optional[int] = tempfile.mkdtemp() __snake_case : Dict = os.path.join(cls.tmpdir , 'default_config.yml' ) write_basic_config(save_location=cls.configPath ) __snake_case : List[Any] = ['accelerate', 'launch', '--config_file', cls.configPath] @classmethod def A_ ( cls : List[str] ) -> List[str]: '''simple docstring''' shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : Any ) -> Optional[Any]: '''simple docstring''' __snake_case : List[Any] = self.get_auto_remove_tmp_dir() __snake_case : Dict = f''' {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --seed=42 --checkpointing_steps epoch --with_tracking '''.split() if is_cuda_and_apex_available(): testargs.append('--fp16' ) run_command(self._launch_args + testargs ) __snake_case : List[Any] = get_results(__a ) self.assertGreaterEqual(result['eval_accuracy'] , 0.7_5 ) self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'glue_no_trainer' ) ) ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : List[Any] ) -> Union[str, Any]: '''simple docstring''' __snake_case : Tuple = self.get_auto_remove_tmp_dir() __snake_case : str = f''' {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --block_size 128 --per_device_train_batch_size 5 --per_device_eval_batch_size 5 --num_train_epochs 2 --output_dir {tmp_dir} --checkpointing_steps epoch --with_tracking '''.split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) __snake_case : str = get_results(__a ) self.assertLess(result['perplexity'] , 100 ) self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'clm_no_trainer' ) ) ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : str ) -> List[str]: '''simple docstring''' __snake_case : int = self.get_auto_remove_tmp_dir() __snake_case : List[str] = f''' {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --num_train_epochs=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) __snake_case : List[str] = get_results(__a ) self.assertLess(result['perplexity'] , 42 ) self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'mlm_no_trainer' ) ) ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu __snake_case : Any = 7 if get_gpu_count() > 1 else 2 __snake_case : Any = self.get_auto_remove_tmp_dir() __snake_case : int = f''' {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) __snake_case : Dict = get_results(__a ) self.assertGreaterEqual(result['eval_accuracy'] , 0.7_5 ) self.assertLess(result['train_loss'] , 0.5 ) self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'ner_no_trainer' ) ) ) @unittest.skip(reason='Fix me @muellerzr' ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : Any ) -> List[Any]: '''simple docstring''' __snake_case : Any = self.get_auto_remove_tmp_dir() __snake_case : Tuple = f''' {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --seed=42 --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) __snake_case : str = get_results(__a ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result['eval_f1'] , 28 ) self.assertGreaterEqual(result['eval_exact'] , 28 ) self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'qa_no_trainer' ) ) ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : Dict ) -> List[Any]: '''simple docstring''' __snake_case : str = self.get_auto_remove_tmp_dir() __snake_case : Any = f''' {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/swag/sample.json --validation_file tests/fixtures/tests_samples/swag/sample.json --output_dir {tmp_dir} --max_train_steps=20 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --with_tracking '''.split() run_command(self._launch_args + testargs ) __snake_case : str = get_results(__a ) self.assertGreaterEqual(result['eval_accuracy'] , 0.8 ) self.assertTrue(os.path.exists(os.path.join(__a , 'swag_no_trainer' ) ) ) @slow @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : Any ) -> Union[str, Any]: '''simple docstring''' __snake_case : Tuple = self.get_auto_remove_tmp_dir() __snake_case : List[str] = f''' {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) __snake_case : int = get_results(__a ) self.assertGreaterEqual(result['eval_rouge1'] , 10 ) self.assertGreaterEqual(result['eval_rouge2'] , 2 ) self.assertGreaterEqual(result['eval_rougeL'] , 7 ) self.assertGreaterEqual(result['eval_rougeLsum'] , 7 ) self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'summarization_no_trainer' ) ) ) @slow @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : Union[str, Any] ) -> int: '''simple docstring''' __snake_case : Tuple = self.get_auto_remove_tmp_dir() __snake_case : str = f''' {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py --model_name_or_path sshleifer/student_marian_en_ro_6_1 --source_lang en --target_lang ro --train_file tests/fixtures/tests_samples/wmt16/sample.json --validation_file tests/fixtures/tests_samples/wmt16/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --num_beams=6 --learning_rate=3e-3 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --source_lang en_XX --target_lang ro_RO --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) __snake_case : Dict = get_results(__a ) self.assertGreaterEqual(result['eval_bleu'] , 30 ) self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'translation_no_trainer' ) ) ) @slow def A_ ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' __snake_case : Union[str, Any] = logging.StreamHandler(sys.stdout ) logger.addHandler(__a ) __snake_case : List[str] = self.get_auto_remove_tmp_dir() __snake_case : int = f''' {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py --dataset_name huggingface/semantic-segmentation-test-sample --output_dir {tmp_dir} --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch '''.split() run_command(self._launch_args + testargs ) __snake_case : List[str] = get_results(__a ) self.assertGreaterEqual(result['eval_overall_accuracy'] , 0.1_0 ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : Tuple ) -> Any: '''simple docstring''' __snake_case : Dict = self.get_auto_remove_tmp_dir() __snake_case : Dict = f''' {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py --model_name_or_path google/vit-base-patch16-224-in21k --dataset_name hf-internal-testing/cats_vs_dogs_sample --learning_rate 1e-4 --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --max_train_steps 2 --train_val_split 0.1 --seed 42 --output_dir {tmp_dir} --with_tracking --checkpointing_steps 1 '''.split() if is_cuda_and_apex_available(): testargs.append('--fp16' ) run_command(self._launch_args + testargs ) __snake_case : Optional[int] = get_results(__a ) # The base model scores a 25% self.assertGreaterEqual(result['eval_accuracy'] , 0.6 ) self.assertTrue(os.path.exists(os.path.join(__a , 'step_1' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'image_classification_no_trainer' ) ) )
0
'''simple docstring''' from math import factorial A__ : dict[str, int] = {str(digit): factorial(digit) for digit in range(1_0)} def a_ ( _UpperCAmelCase : int ) -> int: if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ): raise TypeError('Parameter number must be int' ) if number < 0: raise ValueError('Parameter number must be greater than or equal to 0' ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(_UpperCAmelCase ) ) def a_ ( _UpperCAmelCase : int = 60 ,_UpperCAmelCase : int = 1_00_00_00 ) -> int: if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ) or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ): raise TypeError('Parameters chain_length and number_limit must be int' ) if chain_length <= 0 or number_limit <= 0: raise ValueError( 'Parameters chain_length and number_limit must be greater than 0' ) # the counter for the chains with the exact desired length __snake_case : List[str] = 0 # the cached sizes of the previous chains __snake_case : dict[int, int] = {} for start_chain_element in range(1 ,_UpperCAmelCase ): # The temporary set will contain the elements of the chain __snake_case : Optional[int] = set() __snake_case : List[Any] = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. __snake_case : str = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(_UpperCAmelCase ) chain_set_length += 1 __snake_case : Tuple = digit_factorial_sum(_UpperCAmelCase ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] __snake_case : Optional[Any] = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(F"""{solution()}""")
0
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) A__ : Any = { '''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''], '''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''], '''processing_wav2vec2''': ['''Wav2Vec2Processor'''], '''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Optional[int] = [ '''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Wav2Vec2ForAudioFrameClassification''', '''Wav2Vec2ForCTC''', '''Wav2Vec2ForMaskedLM''', '''Wav2Vec2ForPreTraining''', '''Wav2Vec2ForSequenceClassification''', '''Wav2Vec2ForXVector''', '''Wav2Vec2Model''', '''Wav2Vec2PreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Optional[int] = [ '''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFWav2Vec2ForCTC''', '''TFWav2Vec2Model''', '''TFWav2Vec2PreTrainedModel''', '''TFWav2Vec2ForSequenceClassification''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Dict = [ '''FlaxWav2Vec2ForCTC''', '''FlaxWav2Vec2ForPreTraining''', '''FlaxWav2Vec2Model''', '''FlaxWav2Vec2PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .processing_wavaveca import WavaVecaProcessor from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavaveca import ( WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaForAudioFrameClassification, WavaVecaForCTC, WavaVecaForMaskedLM, WavaVecaForPreTraining, WavaVecaForSequenceClassification, WavaVecaForXVector, WavaVecaModel, WavaVecaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, TFWavaVecaForCTC, TFWavaVecaForSequenceClassification, TFWavaVecaModel, TFWavaVecaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( FlaxWavaVecaForCTC, FlaxWavaVecaForPreTraining, FlaxWavaVecaModel, FlaxWavaVecaPreTrainedModel, ) else: import sys A__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
0
'''simple docstring''' def a_ ( _UpperCAmelCase : int = 1_00 ) -> int: __snake_case : Any = n * (n + 1) * (2 * n + 1) / 6 __snake_case : Union[str, Any] = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F"""{solution() = }""")
0
1
'''simple docstring''' import math_equivalence # From: git+https://github.com/hendrycks/math.git import datasets A__ : List[str] = '''\ @article{hendrycksmath2021, title={Measuring Mathematical Problem Solving With the MATH Dataset}, author={Dan Hendrycks and Collin Burns and Saurav Kadavath and Akul Arora and Steven Basart and Eric Tang and Dawn Song and Jacob Steinhardt}, journal={arXiv preprint arXiv:2103.03874}, year={2021} } ''' A__ : Optional[int] = '''\ This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset. It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy. ''' A__ : Dict = R''' Calculates accuracy after canonicalizing inputs. Args: predictions: list of predictions to score. Each prediction is a string that contains natural language and LaTex. references: list of reference for each prediction. Each reference is a string that contains natural language and LaTex. Returns: accuracy: accuracy after canonicalizing inputs (e.g., converting "1/2" to "\\frac{1}{2}") Examples: >>> metric = datasets.load_metric("competition_math") >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"]) >>> print(results) {\'accuracy\': 1.0} ''' @datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case__ ( datasets.Metric ): def A_ ( self : str ) -> Tuple: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' ), 'references': datasets.Value('string' ), } ) , homepage='https://github.com/hendrycks/math' , codebase_urls=['https://github.com/hendrycks/math'] , ) def A_ ( self : str , __a : List[str] , __a : Optional[Any] ) -> int: '''simple docstring''' __snake_case : Optional[int] = 0.0 for i, j in zip(__a , __a ): n_correct += 1.0 if math_equivalence.is_equiv(__a , __a ) else 0.0 __snake_case : Optional[Any] = n_correct / len(__a ) return { "accuracy": accuracy, }
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available A__ : int = { '''configuration_groupvit''': [ '''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GroupViTConfig''', '''GroupViTOnnxConfig''', '''GroupViTTextConfig''', '''GroupViTVisionConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Tuple = [ '''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GroupViTModel''', '''GroupViTPreTrainedModel''', '''GroupViTTextModel''', '''GroupViTVisionModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Optional[int] = [ '''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFGroupViTModel''', '''TFGroupViTPreTrainedModel''', '''TFGroupViTTextModel''', '''TFGroupViTVisionModel''', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys A__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
0
1
'''simple docstring''' from argparse import ArgumentParser, Namespace from ..utils import logging from . import BaseTransformersCLICommand def a_ ( _UpperCAmelCase : Namespace ) -> Union[str, Any]: return ConvertCommand( args.model_type ,args.tf_checkpoint ,args.pytorch_dump_output ,args.config ,args.finetuning_task_name ) A__ : Optional[int] = ''' transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions. ''' class snake_case__ ( SCREAMING_SNAKE_CASE_ ): @staticmethod def A_ ( __a : ArgumentParser ) -> str: '''simple docstring''' __snake_case : List[str] = parser.add_parser( 'convert' , help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.' , ) train_parser.add_argument('--model_type' , type=__a , required=__a , help='Model\'s type.' ) train_parser.add_argument( '--tf_checkpoint' , type=__a , required=__a , help='TensorFlow checkpoint path or folder.' ) train_parser.add_argument( '--pytorch_dump_output' , type=__a , required=__a , help='Path to the PyTorch saved model output.' ) train_parser.add_argument('--config' , type=__a , default='' , help='Configuration file path or folder.' ) train_parser.add_argument( '--finetuning_task_name' , type=__a , default=__a , help='Optional fine-tuning task name if the TF model was a finetuned model.' , ) train_parser.set_defaults(func=__a ) def __init__( self : Tuple , __a : str , __a : str , __a : str , __a : str , __a : str , *__a : int , ) -> List[str]: '''simple docstring''' __snake_case : Dict = logging.get_logger('transformers-cli/converting' ) self._logger.info(f'''Loading model {model_type}''' ) __snake_case : Dict = model_type __snake_case : Optional[Any] = tf_checkpoint __snake_case : str = pytorch_dump_output __snake_case : Dict = config __snake_case : str = finetuning_task_name def A_ ( self : List[str] ) -> List[Any]: '''simple docstring''' if self._model_type == "albert": try: from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(__a ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "bert": try: from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(__a ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "funnel": try: from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(__a ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "t5": try: from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: raise ImportError(__a ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "gpt": from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "transfo_xl": try: from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( convert_transfo_xl_checkpoint_to_pytorch, ) except ImportError: raise ImportError(__a ) if "ckpt" in self._tf_checkpoint.lower(): __snake_case : Optional[int] = self._tf_checkpoint __snake_case : List[str] = '' else: __snake_case : int = self._tf_checkpoint __snake_case : Tuple = '' convert_transfo_xl_checkpoint_to_pytorch( __a , self._config , self._pytorch_dump_output , __a ) elif self._model_type == "gpt2": try: from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import ( convert_gpta_checkpoint_to_pytorch, ) except ImportError: raise ImportError(__a ) convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "xlnet": try: from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: raise ImportError(__a ) convert_xlnet_checkpoint_to_pytorch( self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name ) elif self._model_type == "xlm": from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "lxmert": from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "rembert": from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( convert_rembert_tf_checkpoint_to_pytorch, ) convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) else: raise ValueError( '--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]' )
0
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): A__ = ShapEPipeline A__ = ['''prompt'''] A__ = ['''prompt'''] A__ = [ '''num_images_per_prompt''', '''num_inference_steps''', '''generator''', '''latents''', '''guidance_scale''', '''frame_size''', '''output_type''', '''return_dict''', ] A__ = False @property def A_ ( self : Optional[Any] ) -> str: '''simple docstring''' return 32 @property def A_ ( self : str ) -> Optional[int]: '''simple docstring''' return 32 @property def A_ ( self : Tuple ) -> List[Any]: '''simple docstring''' return self.time_input_dim * 4 @property def A_ ( self : Tuple ) -> Dict: '''simple docstring''' return 8 @property def A_ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' __snake_case : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def A_ ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' torch.manual_seed(0 ) __snake_case : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(__a ) @property def A_ ( self : Union[str, Any] ) -> int: '''simple docstring''' torch.manual_seed(0 ) __snake_case : Dict = { 'num_attention_heads': 2, 'attention_head_dim': 16, 'embedding_dim': self.time_input_dim, 'num_embeddings': 32, 'embedding_proj_dim': self.text_embedder_hidden_size, 'time_embed_dim': self.time_embed_dim, 'num_layers': 1, 'clip_embed_dim': self.time_input_dim * 2, 'additional_embeddings': 0, 'time_embed_act_fn': 'gelu', 'norm_in_type': 'layer', 'encoder_hid_proj_type': None, 'added_emb_type': None, } __snake_case : Optional[Any] = PriorTransformer(**__a ) return model @property def A_ ( self : Dict ) -> Dict: '''simple docstring''' torch.manual_seed(0 ) __snake_case : Tuple = { 'param_shapes': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), 'd_latent': self.time_input_dim, 'd_hidden': self.renderer_dim, 'n_output': 12, 'background': ( 0.1, 0.1, 0.1, ), } __snake_case : Optional[int] = ShapERenderer(**__a ) return model def A_ ( self : Tuple ) -> Tuple: '''simple docstring''' __snake_case : Tuple = self.dummy_prior __snake_case : Union[str, Any] = self.dummy_text_encoder __snake_case : List[str] = self.dummy_tokenizer __snake_case : Optional[Any] = self.dummy_renderer __snake_case : List[Any] = HeunDiscreteScheduler( beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=__a , clip_sample=__a , clip_sample_range=1.0 , ) __snake_case : int = { 'prior': prior, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'renderer': renderer, 'scheduler': scheduler, } return components def A_ ( self : Union[str, Any] , __a : Dict , __a : int=0 ) -> Optional[Any]: '''simple docstring''' if str(__a ).startswith('mps' ): __snake_case : List[str] = torch.manual_seed(__a ) else: __snake_case : Optional[Any] = torch.Generator(device=__a ).manual_seed(__a ) __snake_case : Optional[int] = { 'prompt': 'horse', 'generator': generator, 'num_inference_steps': 1, 'frame_size': 32, 'output_type': 'np', } return inputs def A_ ( self : List[Any] ) -> List[Any]: '''simple docstring''' __snake_case : Dict = 'cpu' __snake_case : Dict = self.get_dummy_components() __snake_case : int = self.pipeline_class(**__a ) __snake_case : str = pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) __snake_case : Optional[Any] = pipe(**self.get_dummy_inputs(__a ) ) __snake_case : Dict = output.images[0] __snake_case : int = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) __snake_case : str = np.array( [ 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def A_ ( self : Any ) -> List[str]: '''simple docstring''' # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def A_ ( self : int ) -> Tuple: '''simple docstring''' __snake_case : int = torch_device == 'cpu' __snake_case : str = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=__a , relax_max_difference=__a , ) def A_ ( self : List[str] ) -> Dict: '''simple docstring''' __snake_case : str = self.get_dummy_components() __snake_case : Tuple = self.pipeline_class(**__a ) __snake_case : Dict = pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) __snake_case : int = 1 __snake_case : Tuple = 2 __snake_case : Tuple = self.get_dummy_inputs(__a ) for key in inputs.keys(): if key in self.batch_params: __snake_case : Union[str, Any] = batch_size * [inputs[key]] __snake_case : str = pipe(**__a , num_images_per_prompt=__a )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class snake_case__ ( unittest.TestCase ): def A_ ( self : str ) -> Dict: '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def A_ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' __snake_case : Optional[int] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/test_shap_e_np_out.npy' ) __snake_case : Union[str, Any] = ShapEPipeline.from_pretrained('openai/shap-e' ) __snake_case : Any = pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) __snake_case : Optional[int] = torch.Generator(device=__a ).manual_seed(0 ) __snake_case : Union[str, Any] = pipe( 'a shark' , generator=__a , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(__a , __a )
0
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices A__ : List[str] = logging.get_logger(__name__) A__ : Dict = { '''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''', } class snake_case__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): A__ = '''convnextv2''' def __init__( self : Union[str, Any] , __a : Tuple=3 , __a : str=4 , __a : Any=4 , __a : Optional[Any]=None , __a : int=None , __a : int="gelu" , __a : Optional[int]=0.0_2 , __a : List[Any]=1e-12 , __a : Tuple=0.0 , __a : str=224 , __a : Any=None , __a : List[str]=None , **__a : List[str] , ) -> str: '''simple docstring''' super().__init__(**__a ) __snake_case : Dict = num_channels __snake_case : List[Any] = patch_size __snake_case : Dict = num_stages __snake_case : Optional[Any] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes __snake_case : Dict = [3, 3, 9, 3] if depths is None else depths __snake_case : Union[str, Any] = hidden_act __snake_case : List[Any] = initializer_range __snake_case : Optional[Any] = layer_norm_eps __snake_case : Optional[int] = drop_path_rate __snake_case : str = image_size __snake_case : str = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )] __snake_case , __snake_case : Optional[int] = get_aligned_output_features_output_indices( out_features=__a , out_indices=__a , stage_names=self.stage_names )
0
'''simple docstring''' from __future__ import annotations import time import numpy as np A__ : str = [8, 5, 9, 7] A__ : List[str] = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] A__ : Dict = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class snake_case__ : def __init__( self : Union[str, Any] , __a : list[int] , __a : list[list[int]] , __a : list[list[int]] , ) -> None: '''simple docstring''' __snake_case : int = claim_vector __snake_case : Optional[int] = allocated_resources_table __snake_case : List[str] = maximum_claim_table def A_ ( self : str ) -> list[int]: '''simple docstring''' return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def A_ ( self : int ) -> list[int]: '''simple docstring''' return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def A_ ( self : int ) -> list[list[int]]: '''simple docstring''' return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(__a ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def A_ ( self : str ) -> dict[int, list[int]]: '''simple docstring''' return {self.__need().index(__a ): i for i in self.__need()} def A_ ( self : Union[str, Any] , **__a : int ) -> None: '''simple docstring''' __snake_case : str = self.__need() __snake_case : List[Any] = self.__allocated_resources_table __snake_case : Optional[int] = self.__available_resources() __snake_case : Union[str, Any] = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print('_' * 50 + '\n' ) while need_list: __snake_case : Tuple = False for each_need in need_list: __snake_case : Any = True for index, need in enumerate(__a ): if need > available_resources[index]: __snake_case : List[str] = False break if execution: __snake_case : Union[str, Any] = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: __snake_case : str = original_need_index print(f'''Process {process_number + 1} is executing.''' ) # remove the process run from stack need_list.remove(__a ) # update available/freed resources stack __snake_case : Union[str, Any] = np.array(__a ) + np.array( alloc_resources_table[process_number] ) print( 'Updated available resource stack for processes: ' + ' '.join([str(__a ) for x in available_resources] ) ) break if safe: print('The process is in a safe state.\n' ) else: print('System in unsafe state. Aborting...\n' ) break def A_ ( self : List[str] ) -> Optional[int]: '''simple docstring''' print(' ' * 9 + 'Allocated Resource Table' ) for item in self.__allocated_resources_table: print( f'''P{self.__allocated_resources_table.index(__a ) + 1}''' + ' '.join(f'''{it:>8}''' for it in item ) + '\n' ) print(' ' * 9 + 'System Resource Table' ) for item in self.__maximum_claim_table: print( f'''P{self.__maximum_claim_table.index(__a ) + 1}''' + ' '.join(f'''{it:>8}''' for it in item ) + '\n' ) print( 'Current Usage by Active Processes: ' + ' '.join(str(__a ) for x in self.__claim_vector ) ) print( 'Initial Available Resources: ' + ' '.join(str(__a ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
0
1
'''simple docstring''' import inspect import unittest from transformers import MobileViTConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class snake_case__ ( SCREAMING_SNAKE_CASE_ ): def A_ ( self : Any ) -> Optional[int]: '''simple docstring''' __snake_case : Tuple = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__a , 'hidden_sizes' ) ) self.parent.assertTrue(hasattr(__a , 'neck_hidden_sizes' ) ) self.parent.assertTrue(hasattr(__a , 'num_attention_heads' ) ) class snake_case__ : def __init__( self : Any , __a : List[str] , __a : Optional[int]=13 , __a : int=32 , __a : Tuple=2 , __a : Tuple=3 , __a : Any=640 , __a : List[str]=4 , __a : Any="silu" , __a : int=3 , __a : List[str]=32 , __a : Optional[int]=0.1 , __a : Any=0.1 , __a : List[str]=0.1 , __a : List[Any]=0.0_2 , __a : List[str]=True , __a : str=True , __a : Optional[int]=10 , __a : Optional[Any]=None , ) -> Optional[Any]: '''simple docstring''' __snake_case : List[str] = parent __snake_case : List[Any] = batch_size __snake_case : int = image_size __snake_case : List[str] = patch_size __snake_case : int = num_channels __snake_case : Optional[Any] = last_hidden_size __snake_case : int = num_attention_heads __snake_case : Optional[int] = hidden_act __snake_case : Tuple = conv_kernel_size __snake_case : List[Any] = output_stride __snake_case : Optional[Any] = hidden_dropout_prob __snake_case : List[Any] = attention_probs_dropout_prob __snake_case : Any = classifier_dropout_prob __snake_case : Any = use_labels __snake_case : Optional[int] = is_training __snake_case : Optional[int] = num_labels __snake_case : Any = initializer_range __snake_case : Union[str, Any] = scope def A_ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' __snake_case : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case : List[Any] = None __snake_case : int = None if self.use_labels: __snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels ) __snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) __snake_case : Optional[int] = self.get_config() return config, pixel_values, labels, pixel_labels def A_ ( self : List[str] ) -> int: '''simple docstring''' return MobileViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def A_ ( self : str , __a : Union[str, Any] , __a : Optional[int] , __a : Optional[Any] , __a : List[str] ) -> Union[str, Any]: '''simple docstring''' __snake_case : Optional[Any] = MobileViTModel(config=__a ) model.to(__a ) model.eval() __snake_case : Union[str, Any] = model(__a ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def A_ ( self : Optional[Any] , __a : str , __a : Dict , __a : Optional[Any] , __a : Any ) -> int: '''simple docstring''' __snake_case : Union[str, Any] = self.num_labels __snake_case : Tuple = MobileViTForImageClassification(__a ) model.to(__a ) model.eval() __snake_case : Union[str, Any] = model(__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A_ ( self : Dict , __a : List[Any] , __a : List[str] , __a : str , __a : Optional[int] ) -> Dict: '''simple docstring''' __snake_case : Optional[int] = self.num_labels __snake_case : Dict = MobileViTForSemanticSegmentation(__a ) model.to(__a ) model.eval() __snake_case : Any = model(__a ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) __snake_case : Dict = model(__a , labels=__a ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def A_ ( self : Any ) -> Any: '''simple docstring''' __snake_case : Dict = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case , __snake_case : Optional[int] = config_and_inputs __snake_case : Optional[int] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class snake_case__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): A__ = ( (MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation) if is_torch_available() else () ) A__ = ( { '''feature-extraction''': MobileViTModel, '''image-classification''': MobileViTForImageClassification, '''image-segmentation''': MobileViTForSemanticSegmentation, } if is_torch_available() else {} ) A__ = False A__ = False A__ = False A__ = False def A_ ( self : Tuple ) -> Optional[Any]: '''simple docstring''' __snake_case : List[Any] = MobileViTModelTester(self ) __snake_case : str = MobileViTConfigTester(self , config_class=__a , has_text_modality=__a ) def A_ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='MobileViT does not use inputs_embeds' ) def A_ ( self : Any ) -> Union[str, Any]: '''simple docstring''' pass @unittest.skip(reason='MobileViT does not support input and output embeddings' ) def A_ ( self : Tuple ) -> List[Any]: '''simple docstring''' pass @unittest.skip(reason='MobileViT does not output attentions' ) def A_ ( self : int ) -> Union[str, Any]: '''simple docstring''' pass def A_ ( self : int ) -> Any: '''simple docstring''' __snake_case , __snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Any = model_class(__a ) __snake_case : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : List[Any] = [*signature.parameters.keys()] __snake_case : Optional[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , __a ) @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def A_ ( self : Optional[int] ) -> Any: '''simple docstring''' pass def A_ ( self : Any ) -> Optional[Any]: '''simple docstring''' __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def A_ ( self : Union[str, Any] ) -> Tuple: '''simple docstring''' def check_hidden_states_output(__a : Any , __a : Tuple , __a : Optional[Any] ): __snake_case : Union[str, Any] = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): __snake_case : int = model(**self._prepare_for_class(__a , __a ) ) __snake_case : int = outputs.hidden_states __snake_case : Tuple = 5 self.assertEqual(len(__a ) , __a ) # MobileViT's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. __snake_case : Optional[Any] = 2 for i in range(len(__a ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) __snake_case , __snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Optional[Any] = True check_hidden_states_output(__a , __a , __a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case : Tuple = True check_hidden_states_output(__a , __a , __a ) def A_ ( self : Optional[int] ) -> str: '''simple docstring''' __snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) def A_ ( self : int ) -> str: '''simple docstring''' __snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__a ) @slow def A_ ( self : Union[str, Any] ) -> Optional[int]: '''simple docstring''' for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Optional[int] = MobileViTModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def a_ ( ) -> List[Any]: __snake_case : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class snake_case__ ( unittest.TestCase ): @cached_property def A_ ( self : Union[str, Any] ) -> List[str]: '''simple docstring''' return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None @slow def A_ ( self : int ) -> Tuple: '''simple docstring''' __snake_case : Dict = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(__a ) __snake_case : List[Any] = self.default_image_processor __snake_case : Optional[Any] = prepare_img() __snake_case : int = image_processor(images=__a , return_tensors='pt' ).to(__a ) # forward pass with torch.no_grad(): __snake_case : Optional[int] = model(**__a ) # verify the logits __snake_case : Tuple = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __a ) __snake_case : int = torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] ).to(__a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) ) @slow def A_ ( self : Any ) -> Optional[int]: '''simple docstring''' __snake_case : Union[str, Any] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) __snake_case : str = model.to(__a ) __snake_case : Dict = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) __snake_case : Optional[int] = prepare_img() __snake_case : Any = image_processor(images=__a , return_tensors='pt' ).to(__a ) # forward pass with torch.no_grad(): __snake_case : int = model(**__a ) __snake_case : List[str] = outputs.logits # verify the logits __snake_case : Tuple = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , __a ) __snake_case : Union[str, Any] = torch.tensor( [ [[6.9_7_1_3, 6.9_7_8_6, 7.2_4_2_2], [7.2_8_9_3, 7.2_8_2_5, 7.4_4_4_6], [7.6_5_8_0, 7.8_7_9_7, 7.9_4_2_0]], [[-1_0.6_8_6_9, -1_0.3_2_5_0, -1_0.3_4_7_1], [-1_0.4_2_2_8, -9.9_8_6_8, -9.7_1_3_2], [-1_1.0_4_0_5, -1_1.0_2_2_1, -1_0.7_3_1_8]], [[-3.3_0_8_9, -2.8_5_3_9, -2.6_7_4_0], [-3.2_7_0_6, -2.5_6_2_1, -2.5_1_0_8], [-3.2_5_3_4, -2.6_6_1_5, -2.6_6_5_1]], ] , device=__a , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __a , atol=1e-4 ) ) @slow def A_ ( self : List[str] ) -> Any: '''simple docstring''' __snake_case : Optional[Any] = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) __snake_case : int = model.to(__a ) __snake_case : List[str] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' ) __snake_case : str = prepare_img() __snake_case : Optional[int] = image_processor(images=__a , return_tensors='pt' ).to(__a ) # forward pass with torch.no_grad(): __snake_case : Tuple = model(**__a ) __snake_case : Tuple = outputs.logits.detach().cpu() __snake_case : Any = image_processor.post_process_semantic_segmentation(outputs=__a , target_sizes=[(50, 60)] ) __snake_case : Tuple = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , __a ) __snake_case : Optional[int] = image_processor.post_process_semantic_segmentation(outputs=__a ) __snake_case : Dict = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , __a )
0
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_electra import ElectraTokenizer A__ : Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} A__ : List[Any] = { '''vocab_file''': { '''google/electra-small-generator''': ( '''https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt''' ), '''google/electra-base-generator''': '''https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt''', '''google/electra-large-generator''': ( '''https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt''' ), '''google/electra-small-discriminator''': ( '''https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt''' ), '''google/electra-base-discriminator''': ( '''https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt''' ), '''google/electra-large-discriminator''': ( '''https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''google/electra-small-generator''': ( '''https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json''' ), '''google/electra-base-generator''': ( '''https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json''' ), '''google/electra-large-generator''': ( '''https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json''' ), '''google/electra-small-discriminator''': ( '''https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json''' ), '''google/electra-base-discriminator''': ( '''https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json''' ), '''google/electra-large-discriminator''': ( '''https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json''' ), }, } A__ : List[Any] = { '''google/electra-small-generator''': 5_1_2, '''google/electra-base-generator''': 5_1_2, '''google/electra-large-generator''': 5_1_2, '''google/electra-small-discriminator''': 5_1_2, '''google/electra-base-discriminator''': 5_1_2, '''google/electra-large-discriminator''': 5_1_2, } A__ : Optional[Any] = { '''google/electra-small-generator''': {'''do_lower_case''': True}, '''google/electra-base-generator''': {'''do_lower_case''': True}, '''google/electra-large-generator''': {'''do_lower_case''': True}, '''google/electra-small-discriminator''': {'''do_lower_case''': True}, '''google/electra-base-discriminator''': {'''do_lower_case''': True}, '''google/electra-large-discriminator''': {'''do_lower_case''': True}, } class snake_case__ ( SCREAMING_SNAKE_CASE_ ): A__ = VOCAB_FILES_NAMES A__ = PRETRAINED_VOCAB_FILES_MAP A__ = PRETRAINED_INIT_CONFIGURATION A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ = ElectraTokenizer def __init__( self : int , __a : List[Any]=None , __a : int=None , __a : List[str]=True , __a : Any="[UNK]" , __a : Any="[SEP]" , __a : Union[str, Any]="[PAD]" , __a : Dict="[CLS]" , __a : List[Any]="[MASK]" , __a : str=True , __a : Optional[int]=None , **__a : Optional[int] , ) -> str: '''simple docstring''' super().__init__( __a , tokenizer_file=__a , do_lower_case=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , tokenize_chinese_chars=__a , strip_accents=__a , **__a , ) __snake_case : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , __a ) != do_lower_case or normalizer_state.get('strip_accents' , __a ) != strip_accents or normalizer_state.get('handle_chinese_chars' , __a ) != tokenize_chinese_chars ): __snake_case : List[Any] = getattr(__a , normalizer_state.pop('type' ) ) __snake_case : str = do_lower_case __snake_case : Optional[int] = strip_accents __snake_case : Any = tokenize_chinese_chars __snake_case : Union[str, Any] = normalizer_class(**__a ) __snake_case : Any = do_lower_case def A_ ( self : Any , __a : List[str] , __a : Optional[Any]=None ) -> Dict: '''simple docstring''' __snake_case : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def A_ ( self : List[Any] , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' __snake_case : int = [self.sep_token_id] __snake_case : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def A_ ( self : Optional[int] , __a : str , __a : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' __snake_case : Tuple = self._tokenizer.model.save(__a , name=__a ) return tuple(__a )
0
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging A__ : Union[str, Any] = logging.get_logger(__name__) class snake_case__ ( SCREAMING_SNAKE_CASE_ ): A__ = '''timm_backbone''' def __init__( self : Optional[Any] , __a : List[str]=None , __a : int=3 , __a : Tuple=True , __a : Tuple=True , __a : Tuple=None , **__a : Tuple , ) -> Optional[Any]: '''simple docstring''' super().__init__(**__a ) __snake_case : Any = backbone __snake_case : Union[str, Any] = num_channels __snake_case : Optional[int] = features_only __snake_case : str = use_pretrained_backbone __snake_case : Any = True __snake_case : Union[str, Any] = out_indices if out_indices is not None else (-1,)
0
'''simple docstring''' def a_ ( _UpperCAmelCase : int ) -> bool: __snake_case : Union[str, Any] = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(2_7)) print(perfect_cube(4))
0
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) A__ : List[Any] = { '''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''], '''tokenization_electra''': ['''ElectraTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Tuple = ['''ElectraTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Any = [ '''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ElectraForCausalLM''', '''ElectraForMaskedLM''', '''ElectraForMultipleChoice''', '''ElectraForPreTraining''', '''ElectraForQuestionAnswering''', '''ElectraForSequenceClassification''', '''ElectraForTokenClassification''', '''ElectraModel''', '''ElectraPreTrainedModel''', '''load_tf_weights_in_electra''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Optional[Any] = [ '''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFElectraForMaskedLM''', '''TFElectraForMultipleChoice''', '''TFElectraForPreTraining''', '''TFElectraForQuestionAnswering''', '''TFElectraForSequenceClassification''', '''TFElectraForTokenClassification''', '''TFElectraModel''', '''TFElectraPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Optional[Any] = [ '''FlaxElectraForCausalLM''', '''FlaxElectraForMaskedLM''', '''FlaxElectraForMultipleChoice''', '''FlaxElectraForPreTraining''', '''FlaxElectraForQuestionAnswering''', '''FlaxElectraForSequenceClassification''', '''FlaxElectraForTokenClassification''', '''FlaxElectraModel''', '''FlaxElectraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys A__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
0
'''simple docstring''' import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss A__ : Tuple = pytest.mark.integration @require_faiss class snake_case__ ( SCREAMING_SNAKE_CASE_ ): def A_ ( self : Any ) -> Tuple: '''simple docstring''' __snake_case : Dict = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(__a ) for x in np.arange(30 ).tolist()]} ) return dset def A_ ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' import faiss __snake_case : Dataset = self._create_dummy_dataset() __snake_case : Dict = dset.map( lambda __a , __a : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__a , keep_in_memory=__a ) __snake_case : List[Any] = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT ) __snake_case , __snake_case : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) dset.drop_index('vecs' ) def A_ ( self : Tuple ) -> Any: '''simple docstring''' import faiss __snake_case : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , ) __snake_case , __snake_case : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) def A_ ( self : List[Any] ) -> Dict: '''simple docstring''' import faiss __snake_case : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=__a ) as tmp_file: dset.save_faiss_index('vecs' , tmp_file.name ) dset.load_faiss_index('vecs2' , tmp_file.name ) os.unlink(tmp_file.name ) __snake_case , __snake_case : str = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) def A_ ( self : Union[str, Any] ) -> Dict: '''simple docstring''' __snake_case : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' ) dset.drop_index('vecs' ) self.assertRaises(__a , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) ) def A_ ( self : List[str] ) -> List[str]: '''simple docstring''' from elasticsearch import Elasticsearch __snake_case : Dataset = self._create_dummy_dataset() with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch( 'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk: __snake_case : Any = {'acknowledged': True} mocked_bulk.return_value([(True, None)] * 30 ) __snake_case : Dict = {'hits': {'hits': [{'_score': 1, '_id': 29}]}} __snake_case : Union[str, Any] = Elasticsearch() dset.add_elasticsearch_index('filename' , es_client=__a ) __snake_case , __snake_case : str = dset.get_nearest_examples('filename' , 'my_name-train_29' ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) @require_faiss class snake_case__ ( SCREAMING_SNAKE_CASE_ ): def A_ ( self : str ) -> int: '''simple docstring''' import faiss __snake_case : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 10 ) # single query __snake_case : Dict = np.zeros(5 , dtype=np.floataa ) __snake_case : List[str] = 1 __snake_case , __snake_case : List[Any] = index.search(__a ) self.assertRaises(__a , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries __snake_case : List[str] = np.eye(5 , dtype=np.floataa )[::-1] __snake_case , __snake_case : Dict = index.search_batch(__a ) self.assertRaises(__a , index.search_batch , queries[0] ) __snake_case : Any = [scores[0] for scores in total_scores] __snake_case : List[Any] = [indices[0] for indices in total_indices] self.assertGreater(np.min(__a ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , __a ) def A_ ( self : int ) -> int: '''simple docstring''' import faiss __snake_case : int = FaissIndex(string_factory='Flat' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) __snake_case : List[str] = FaissIndex(string_factory='LSH' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(__a ): __snake_case : Dict = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) ) def A_ ( self : str ) -> Dict: '''simple docstring''' import faiss __snake_case : Tuple = faiss.IndexFlat(5 ) __snake_case : List[Any] = FaissIndex(custom_index=__a ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def A_ ( self : List[Any] ) -> int: '''simple docstring''' import faiss __snake_case : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=__a ) as tmp_file: index.save(tmp_file.name ) __snake_case : List[Any] = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) __snake_case : List[Any] = np.zeros(5 , dtype=np.floataa ) __snake_case : Any = 1 __snake_case , __snake_case : int = index.search(__a ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def a_ ( _UpperCAmelCase : str ) -> Optional[int]: import faiss __snake_case : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 ,dtype=np.floataa ) ) __snake_case : Dict = 'index.faiss' __snake_case : Any = f'''mock://{index_name}''' index.save(_UpperCAmelCase ,storage_options=mockfs.storage_options ) __snake_case : Any = FaissIndex.load(_UpperCAmelCase ,storage_options=mockfs.storage_options ) __snake_case : Any = np.zeros(5 ,dtype=np.floataa ) __snake_case : Any = 1 __snake_case , __snake_case : Tuple = index.search(_UpperCAmelCase ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class snake_case__ ( SCREAMING_SNAKE_CASE_ ): def A_ ( self : List[str] ) -> List[str]: '''simple docstring''' from elasticsearch import Elasticsearch with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch( 'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk: __snake_case : int = Elasticsearch() __snake_case : Dict = {'acknowledged': True} __snake_case : List[Any] = ElasticSearchIndex(es_client=__a ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(['foo', 'bar', 'foobar'] ) # single query __snake_case : Optional[Any] = 'foo' __snake_case : int = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} __snake_case , __snake_case : List[Any] = index.search(__a ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout __snake_case : Dict = 'foo' __snake_case : Dict = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} __snake_case , __snake_case : Optional[Any] = index.search(__a , request_timeout=30 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries __snake_case : List[Any] = ['foo', 'bar', 'foobar'] __snake_case : str = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} __snake_case , __snake_case : Any = index.search_batch(__a ) __snake_case : Any = [scores[0] for scores in total_scores] __snake_case : Tuple = [indices[0] for indices in total_indices] self.assertGreater(np.min(__a ) , 0 ) self.assertListEqual([1, 1, 1] , __a ) # batched queries with timeout __snake_case : Tuple = ['foo', 'bar', 'foobar'] __snake_case : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} __snake_case , __snake_case : int = index.search_batch(__a , request_timeout=30 ) __snake_case : Any = [scores[0] for scores in total_scores] __snake_case : Dict = [indices[0] for indices in total_indices] self.assertGreater(np.min(__a ) , 0 ) self.assertListEqual([1, 1, 1] , __a )
0
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class snake_case__ : A__ = PegasusConfig A__ = {} A__ = '''gelu''' def __init__( self : Tuple , __a : Any , __a : str=13 , __a : Union[str, Any]=7 , __a : Optional[Any]=True , __a : Optional[int]=False , __a : Tuple=99 , __a : Optional[Any]=32 , __a : Any=2 , __a : Tuple=4 , __a : List[str]=37 , __a : Any=0.1 , __a : int=0.1 , __a : List[str]=40 , __a : List[str]=2 , __a : Optional[int]=1 , __a : Any=0 , ) -> List[Any]: '''simple docstring''' __snake_case : Union[str, Any] = parent __snake_case : List[Any] = batch_size __snake_case : Tuple = seq_length __snake_case : Dict = is_training __snake_case : str = use_labels __snake_case : str = vocab_size __snake_case : List[Any] = hidden_size __snake_case : Union[str, Any] = num_hidden_layers __snake_case : List[str] = num_attention_heads __snake_case : Optional[Any] = intermediate_size __snake_case : List[Any] = hidden_dropout_prob __snake_case : Tuple = attention_probs_dropout_prob __snake_case : List[Any] = max_position_embeddings __snake_case : List[str] = eos_token_id __snake_case : Any = pad_token_id __snake_case : Tuple = bos_token_id def A_ ( self : List[Any] ) -> List[Any]: '''simple docstring''' __snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) __snake_case : List[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) __snake_case : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 ) __snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : Any = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __snake_case : Dict = prepare_pegasus_inputs_dict(__a , __a , __a ) return config, inputs_dict def A_ ( self : Union[str, Any] , __a : Optional[Any] , __a : List[Any] ) -> Optional[int]: '''simple docstring''' __snake_case : List[str] = TFPegasusModel(config=__a ).get_decoder() __snake_case : Union[str, Any] = inputs_dict['input_ids'] __snake_case : Optional[Any] = input_ids[:1, :] __snake_case : int = inputs_dict['attention_mask'][:1, :] __snake_case : Optional[Any] = inputs_dict['head_mask'] __snake_case : str = 1 # first forward pass __snake_case : Tuple = model(__a , attention_mask=__a , head_mask=__a , use_cache=__a ) __snake_case , __snake_case : Dict = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __snake_case : str = ids_tensor((self.batch_size, 3) , config.vocab_size ) __snake_case : List[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and __snake_case : Any = tf.concat([input_ids, next_tokens] , axis=-1 ) __snake_case : List[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) __snake_case : Union[str, Any] = model(__a , attention_mask=__a )[0] __snake_case : Tuple = model(__a , attention_mask=__a , past_key_values=__a )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice __snake_case : str = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) __snake_case : Optional[int] = output_from_no_past[:, -3:, random_slice_idx] __snake_case : Tuple = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__a , __a , rtol=1e-3 ) def a_ ( _UpperCAmelCase : Any ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : List[Any]=None ,_UpperCAmelCase : Dict=None ,_UpperCAmelCase : List[Any]=None ,_UpperCAmelCase : int=None ,_UpperCAmelCase : str=None ,) -> Optional[Any]: if attention_mask is None: __snake_case : List[Any] = tf.cast(tf.math.not_equal(_UpperCAmelCase ,config.pad_token_id ) ,tf.inta ) if decoder_attention_mask is None: __snake_case : str = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ), ] ,axis=-1 ,) if head_mask is None: __snake_case : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __snake_case : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __snake_case : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class snake_case__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): A__ = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () A__ = (TFPegasusForConditionalGeneration,) if is_tf_available() else () A__ = ( { '''conversational''': TFPegasusForConditionalGeneration, '''feature-extraction''': TFPegasusModel, '''summarization''': TFPegasusForConditionalGeneration, '''text2text-generation''': TFPegasusForConditionalGeneration, '''translation''': TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) A__ = True A__ = False A__ = False def A_ ( self : Optional[int] ) -> List[Any]: '''simple docstring''' __snake_case : List[Any] = TFPegasusModelTester(self ) __snake_case : Dict = ConfigTester(self , config_class=__a ) def A_ ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' self.config_tester.run_common_tests() def A_ ( self : int ) -> List[Any]: '''simple docstring''' __snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__a ) @require_sentencepiece @require_tokenizers @require_tf class snake_case__ ( unittest.TestCase ): A__ = [ ''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''', ''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''', ] A__ = [ '''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to''' ''' reduce the risk of wildfires.''', '''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''', ] # differs slightly from pytorch, likely due to numerical differences in linear layers A__ = '''google/pegasus-xsum''' @cached_property def A_ ( self : int ) -> str: '''simple docstring''' return AutoTokenizer.from_pretrained(self.model_name ) @cached_property def A_ ( self : List[Any] ) -> Any: '''simple docstring''' __snake_case : str = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model def A_ ( self : Tuple , **__a : Optional[int] ) -> Any: '''simple docstring''' __snake_case : Dict = self.translate_src_text(**__a ) assert self.expected_text == generated_words def A_ ( self : Optional[int] , **__a : Dict ) -> List[str]: '''simple docstring''' __snake_case : Dict = self.tokenizer(self.src_text , **__a , padding=__a , return_tensors='tf' ) __snake_case : str = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__a , ) __snake_case : int = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__a ) return generated_words @slow def A_ ( self : Dict ) -> str: '''simple docstring''' self._assert_generated_batch_equal_expected()
0
'''simple docstring''' from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging A__ : List[Any] = logging.get_logger(__name__) A__ : Tuple = { '''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''', '''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''', '''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''', '''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''', '''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''', } class snake_case__ ( SCREAMING_SNAKE_CASE_ ): A__ = '''t5''' A__ = ['''past_key_values'''] A__ = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self : str , __a : Dict=32128 , __a : Dict=512 , __a : Union[str, Any]=64 , __a : str=2048 , __a : Union[str, Any]=6 , __a : Any=None , __a : Any=8 , __a : List[Any]=32 , __a : Any=128 , __a : Tuple=0.1 , __a : str=1e-6 , __a : Dict=1.0 , __a : Tuple="relu" , __a : Dict=True , __a : Union[str, Any]=True , __a : Any=0 , __a : Dict=1 , **__a : Union[str, Any] , ) -> Union[str, Any]: '''simple docstring''' __snake_case : int = vocab_size __snake_case : str = d_model __snake_case : str = d_kv __snake_case : List[Any] = d_ff __snake_case : List[str] = num_layers __snake_case : Tuple = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry __snake_case : Union[str, Any] = num_heads __snake_case : Tuple = relative_attention_num_buckets __snake_case : Optional[int] = relative_attention_max_distance __snake_case : Optional[Any] = dropout_rate __snake_case : str = layer_norm_epsilon __snake_case : List[str] = initializer_factor __snake_case : int = feed_forward_proj __snake_case : Optional[Any] = use_cache __snake_case : Optional[Any] = self.feed_forward_proj.split('-' ) __snake_case : Dict = act_info[-1] __snake_case : List[str] = act_info[0] == 'gated' if len(__a ) > 1 and act_info[0] != "gated" or len(__a ) > 2: raise ValueError( f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.''' 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": __snake_case : Dict = 'gelu_new' super().__init__( pad_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , **__a , ) class snake_case__ ( SCREAMING_SNAKE_CASE_ ): @property def A_ ( self : str ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' __snake_case : Union[str, Any] = { 'input_ids': {0: 'batch', 1: 'encoder_sequence'}, 'attention_mask': {0: 'batch', 1: 'encoder_sequence'}, } if self.use_past: __snake_case : Tuple = 'past_encoder_sequence + sequence' __snake_case : Dict = {0: 'batch'} __snake_case : Dict = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: __snake_case : Tuple = {0: 'batch', 1: 'decoder_sequence'} __snake_case : int = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(__a , direction='inputs' ) return common_inputs @property def A_ ( self : List[Any] ) -> int: '''simple docstring''' return 13
0
1
'''simple docstring''' from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def a_ ( _UpperCAmelCase : Dict[str, torch.Tensor] ) -> Dict[str, torch.Tensor]: __snake_case : Any = [] __snake_case : List[str] = [] __snake_case : Optional[int] = [] for rt in rc.restypes: __snake_case : int = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] ) __snake_case : Optional[int] = {name: i for i, name in enumerate(_UpperCAmelCase )} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] ) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] ) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 14 ) restype_atomaa_to_atomaa_list.append([0] * 37 ) restype_atomaa_mask_list.append([0.0] * 14 ) __snake_case : Optional[Any] = torch.tensor( _UpperCAmelCase ,dtype=torch.intaa ,device=protein['aatype'].device ,) __snake_case : Union[str, Any] = torch.tensor( _UpperCAmelCase ,dtype=torch.intaa ,device=protein['aatype'].device ,) __snake_case : Optional[Any] = torch.tensor( _UpperCAmelCase ,dtype=torch.floataa ,device=protein['aatype'].device ,) __snake_case : Dict = protein['aatype'].to(torch.long ) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein __snake_case : Any = restype_atomaa_to_atomaa[protein_aatype] __snake_case : List[str] = restype_atomaa_mask[protein_aatype] __snake_case : Tuple = residx_atomaa_mask __snake_case : Union[str, Any] = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back __snake_case : Optional[Any] = restype_atomaa_to_atomaa[protein_aatype] __snake_case : Any = residx_atomaa_to_atomaa.long() # create the corresponding mask __snake_case : Any = torch.zeros([21, 37] ,dtype=torch.floataa ,device=protein['aatype'].device ) for restype, restype_letter in enumerate(rc.restypes ): __snake_case : Optional[Any] = rc.restype_atoa[restype_letter] __snake_case : Optional[int] = rc.residue_atoms[restype_name] for atom_name in atom_names: __snake_case : Tuple = rc.atom_order[atom_name] __snake_case : List[str] = 1 __snake_case : str = restype_atomaa_mask[protein_aatype] __snake_case : List[str] = residx_atomaa_mask return protein def a_ ( _UpperCAmelCase : Dict[str, torch.Tensor] ) -> Dict[str, np.ndarray]: __snake_case : Union[str, Any] = tree_map(lambda _UpperCAmelCase : torch.tensor(_UpperCAmelCase ,device=batch['aatype'].device ) ,_UpperCAmelCase ,np.ndarray ) __snake_case : List[Any] = tensor_tree_map(lambda _UpperCAmelCase : np.array(_UpperCAmelCase ) ,make_atomaa_masks(_UpperCAmelCase ) ) return out
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging A__ : Tuple = logging.get_logger(__name__) A__ : Optional[int] = {} class snake_case__ ( SCREAMING_SNAKE_CASE_ ): A__ = '''llama''' A__ = ['''past_key_values'''] def __init__( self : Any , __a : List[str]=32000 , __a : Union[str, Any]=4096 , __a : Optional[Any]=11008 , __a : Any=32 , __a : str=32 , __a : Optional[int]=None , __a : Dict="silu" , __a : Dict=2048 , __a : List[str]=0.0_2 , __a : Union[str, Any]=1e-6 , __a : Dict=True , __a : List[str]=0 , __a : Tuple=1 , __a : Tuple=2 , __a : Optional[Any]=1 , __a : Any=False , __a : Tuple=None , **__a : List[Any] , ) -> Optional[int]: '''simple docstring''' __snake_case : str = vocab_size __snake_case : List[str] = max_position_embeddings __snake_case : List[Any] = hidden_size __snake_case : Union[str, Any] = intermediate_size __snake_case : Optional[int] = num_hidden_layers __snake_case : List[Any] = num_attention_heads # for backward compatibility if num_key_value_heads is None: __snake_case : Optional[int] = num_attention_heads __snake_case : Optional[Any] = num_key_value_heads __snake_case : int = hidden_act __snake_case : Any = initializer_range __snake_case : Any = rms_norm_eps __snake_case : Union[str, Any] = pretraining_tp __snake_case : Optional[int] = use_cache __snake_case : Any = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , tie_word_embeddings=__a , **__a , ) def A_ ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __a ) or len(self.rope_scaling ) != 2: raise ValueError( '`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ' f'''got {self.rope_scaling}''' ) __snake_case : Optional[Any] = self.rope_scaling.get('type' , __a ) __snake_case : Tuple = self.rope_scaling.get('factor' , __a ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(__a , __a ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
0
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging A__ : List[Any] = logging.get_logger(__name__) A__ : int = { '''caidas/swin2sr-classicalsr-x2-64''': ( '''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json''' ), } class snake_case__ ( SCREAMING_SNAKE_CASE_ ): A__ = '''swin2sr''' A__ = { '''hidden_size''': '''embed_dim''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : Optional[Any] , __a : List[Any]=64 , __a : Dict=1 , __a : Dict=3 , __a : List[str]=180 , __a : Union[str, Any]=[6, 6, 6, 6, 6, 6] , __a : Any=[6, 6, 6, 6, 6, 6] , __a : int=8 , __a : int=2.0 , __a : List[str]=True , __a : str=0.0 , __a : Any=0.0 , __a : str=0.1 , __a : List[str]="gelu" , __a : str=False , __a : str=0.0_2 , __a : List[Any]=1e-5 , __a : Union[str, Any]=2 , __a : List[str]=1.0 , __a : Tuple="1conv" , __a : Dict="pixelshuffle" , **__a : Dict , ) -> List[Any]: '''simple docstring''' super().__init__(**__a ) __snake_case : Any = image_size __snake_case : Union[str, Any] = patch_size __snake_case : Tuple = num_channels __snake_case : int = embed_dim __snake_case : Dict = depths __snake_case : Tuple = len(__a ) __snake_case : Union[str, Any] = num_heads __snake_case : Optional[Any] = window_size __snake_case : List[str] = mlp_ratio __snake_case : int = qkv_bias __snake_case : str = hidden_dropout_prob __snake_case : Union[str, Any] = attention_probs_dropout_prob __snake_case : Optional[Any] = drop_path_rate __snake_case : str = hidden_act __snake_case : str = use_absolute_embeddings __snake_case : int = layer_norm_eps __snake_case : List[Any] = initializer_range __snake_case : int = upscale __snake_case : int = img_range __snake_case : Dict = resi_connection __snake_case : Dict = upsampler
0
'''simple docstring''' from __future__ import annotations A__ : str = '''Muhammad Umer Farooq''' A__ : int = '''MIT''' A__ : Optional[int] = '''1.0.0''' A__ : List[Any] = '''Muhammad Umer Farooq''' A__ : Optional[Any] = '''[email protected]''' A__ : Optional[Any] = '''Alpha''' import re from html.parser import HTMLParser from urllib import parse import requests class snake_case__ ( SCREAMING_SNAKE_CASE_ ): def __init__( self : Union[str, Any] , __a : str ) -> None: '''simple docstring''' super().__init__() __snake_case : list[str] = [] __snake_case : Dict = domain def A_ ( self : Dict , __a : str , __a : list[tuple[str, str | None]] ) -> None: '''simple docstring''' # Only parse the 'anchor' tag. if tag == "a": # Check the list of defined attributes. for name, value in attrs: # If href is defined, and not empty nor # print it. if name == "href" and value != "#" and value != "": # If not already in urls. if value not in self.urls: __snake_case : Optional[Any] = parse.urljoin(self.domain , __a ) self.urls.append(__a ) def a_ ( _UpperCAmelCase : str ) -> str: return ".".join(get_sub_domain_name(_UpperCAmelCase ).split('.' )[-2:] ) def a_ ( _UpperCAmelCase : str ) -> str: return parse.urlparse(_UpperCAmelCase ).netloc def a_ ( _UpperCAmelCase : str = "https://github.com" ) -> list[str]: __snake_case : List[Any] = get_domain_name(_UpperCAmelCase ) # Initialize the parser __snake_case : Tuple = Parser(_UpperCAmelCase ) try: # Open URL __snake_case : Any = requests.get(_UpperCAmelCase ) # pass the raw HTML to the parser to get links parser.feed(r.text ) # Get links and loop through __snake_case : Dict = set() for link in parser.urls: # open URL. # read = requests.get(link) try: __snake_case : List[Any] = requests.get(_UpperCAmelCase ) # Get the valid email. __snake_case : Optional[Any] = re.findall('[a-zA-Z0-9]+@' + domain ,read.text ) # If not in list then append it. for email in emails: valid_emails.add(_UpperCAmelCase ) except ValueError: pass except ValueError: raise SystemExit(1 ) # Finally return a sorted list of email addresses with no duplicates. return sorted(_UpperCAmelCase ) if __name__ == "__main__": A__ : Tuple = emails_from_url('''https://github.com''') print(F"""{len(emails)} emails found:""") print('''\n'''.join(sorted(emails)))
0
1
'''simple docstring''' from math import asin, atan, cos, radians, sin, sqrt, tan A__ : Optional[Any] = 6_37_81_37.0 A__ : int = 6_35_67_52.31_42_45 A__ : Optional[Any] = 6_3_7_8_1_3_7 def a_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ,_UpperCAmelCase : float ,_UpperCAmelCase : float ) -> float: __snake_case : int = (AXIS_A - AXIS_B) / AXIS_A __snake_case : List[Any] = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) ) __snake_case : List[Any] = atan((1 - flattening) * tan(radians(_UpperCAmelCase ) ) ) __snake_case : int = radians(_UpperCAmelCase ) __snake_case : List[Any] = radians(_UpperCAmelCase ) # Equation __snake_case : int = sin((phi_a - phi_a) / 2 ) __snake_case : Tuple = sin((lambda_a - lambda_a) / 2 ) # Square both values sin_sq_phi *= sin_sq_phi sin_sq_lambda *= sin_sq_lambda __snake_case : Any = sqrt(sin_sq_phi + (cos(_UpperCAmelCase ) * cos(_UpperCAmelCase ) * sin_sq_lambda) ) return 2 * RADIUS * asin(_UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
0
'''simple docstring''' import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) A__ : Dict = logging.getLogger() def a_ ( ) -> Tuple: __snake_case : List[Any] = argparse.ArgumentParser() parser.add_argument('-f' ) __snake_case : Any = parser.parse_args() return args.f def a_ ( _UpperCAmelCase : Optional[int] ) -> List[Any]: __snake_case : Tuple = {} __snake_case : Union[str, Any] = os.path.join(_UpperCAmelCase ,'all_results.json' ) if os.path.exists(_UpperCAmelCase ): with open(_UpperCAmelCase ,'r' ) as f: __snake_case : List[str] = json.load(_UpperCAmelCase ) else: raise ValueError(f'''can\'t find {path}''' ) return results def a_ ( ) -> Union[str, Any]: __snake_case : Union[str, Any] = torch.cuda.is_available() and torch_device == 'cuda' return is_using_cuda and is_apex_available() A__ : str = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class snake_case__ ( SCREAMING_SNAKE_CASE_ ): @classmethod def A_ ( cls : Any ) -> List[str]: '''simple docstring''' # Write Accelerate config, will pick up on CPU, GPU, and multi-GPU __snake_case : Optional[int] = tempfile.mkdtemp() __snake_case : Dict = os.path.join(cls.tmpdir , 'default_config.yml' ) write_basic_config(save_location=cls.configPath ) __snake_case : List[Any] = ['accelerate', 'launch', '--config_file', cls.configPath] @classmethod def A_ ( cls : List[str] ) -> List[str]: '''simple docstring''' shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : Any ) -> Optional[Any]: '''simple docstring''' __snake_case : List[Any] = self.get_auto_remove_tmp_dir() __snake_case : Dict = f''' {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --seed=42 --checkpointing_steps epoch --with_tracking '''.split() if is_cuda_and_apex_available(): testargs.append('--fp16' ) run_command(self._launch_args + testargs ) __snake_case : List[Any] = get_results(__a ) self.assertGreaterEqual(result['eval_accuracy'] , 0.7_5 ) self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'glue_no_trainer' ) ) ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : List[Any] ) -> Union[str, Any]: '''simple docstring''' __snake_case : Tuple = self.get_auto_remove_tmp_dir() __snake_case : str = f''' {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --block_size 128 --per_device_train_batch_size 5 --per_device_eval_batch_size 5 --num_train_epochs 2 --output_dir {tmp_dir} --checkpointing_steps epoch --with_tracking '''.split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) __snake_case : str = get_results(__a ) self.assertLess(result['perplexity'] , 100 ) self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'clm_no_trainer' ) ) ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : str ) -> List[str]: '''simple docstring''' __snake_case : int = self.get_auto_remove_tmp_dir() __snake_case : List[str] = f''' {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --num_train_epochs=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) __snake_case : List[str] = get_results(__a ) self.assertLess(result['perplexity'] , 42 ) self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'mlm_no_trainer' ) ) ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu __snake_case : Any = 7 if get_gpu_count() > 1 else 2 __snake_case : Any = self.get_auto_remove_tmp_dir() __snake_case : int = f''' {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) __snake_case : Dict = get_results(__a ) self.assertGreaterEqual(result['eval_accuracy'] , 0.7_5 ) self.assertLess(result['train_loss'] , 0.5 ) self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'ner_no_trainer' ) ) ) @unittest.skip(reason='Fix me @muellerzr' ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : Any ) -> List[Any]: '''simple docstring''' __snake_case : Any = self.get_auto_remove_tmp_dir() __snake_case : Tuple = f''' {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --seed=42 --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) __snake_case : str = get_results(__a ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result['eval_f1'] , 28 ) self.assertGreaterEqual(result['eval_exact'] , 28 ) self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'qa_no_trainer' ) ) ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : Dict ) -> List[Any]: '''simple docstring''' __snake_case : str = self.get_auto_remove_tmp_dir() __snake_case : Any = f''' {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/swag/sample.json --validation_file tests/fixtures/tests_samples/swag/sample.json --output_dir {tmp_dir} --max_train_steps=20 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --with_tracking '''.split() run_command(self._launch_args + testargs ) __snake_case : str = get_results(__a ) self.assertGreaterEqual(result['eval_accuracy'] , 0.8 ) self.assertTrue(os.path.exists(os.path.join(__a , 'swag_no_trainer' ) ) ) @slow @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : Any ) -> Union[str, Any]: '''simple docstring''' __snake_case : Tuple = self.get_auto_remove_tmp_dir() __snake_case : List[str] = f''' {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) __snake_case : int = get_results(__a ) self.assertGreaterEqual(result['eval_rouge1'] , 10 ) self.assertGreaterEqual(result['eval_rouge2'] , 2 ) self.assertGreaterEqual(result['eval_rougeL'] , 7 ) self.assertGreaterEqual(result['eval_rougeLsum'] , 7 ) self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'summarization_no_trainer' ) ) ) @slow @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : Union[str, Any] ) -> int: '''simple docstring''' __snake_case : Tuple = self.get_auto_remove_tmp_dir() __snake_case : str = f''' {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py --model_name_or_path sshleifer/student_marian_en_ro_6_1 --source_lang en --target_lang ro --train_file tests/fixtures/tests_samples/wmt16/sample.json --validation_file tests/fixtures/tests_samples/wmt16/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --num_beams=6 --learning_rate=3e-3 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --source_lang en_XX --target_lang ro_RO --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) __snake_case : Dict = get_results(__a ) self.assertGreaterEqual(result['eval_bleu'] , 30 ) self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'translation_no_trainer' ) ) ) @slow def A_ ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' __snake_case : Union[str, Any] = logging.StreamHandler(sys.stdout ) logger.addHandler(__a ) __snake_case : List[str] = self.get_auto_remove_tmp_dir() __snake_case : int = f''' {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py --dataset_name huggingface/semantic-segmentation-test-sample --output_dir {tmp_dir} --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch '''.split() run_command(self._launch_args + testargs ) __snake_case : List[str] = get_results(__a ) self.assertGreaterEqual(result['eval_overall_accuracy'] , 0.1_0 ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : Tuple ) -> Any: '''simple docstring''' __snake_case : Dict = self.get_auto_remove_tmp_dir() __snake_case : Dict = f''' {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py --model_name_or_path google/vit-base-patch16-224-in21k --dataset_name hf-internal-testing/cats_vs_dogs_sample --learning_rate 1e-4 --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --max_train_steps 2 --train_val_split 0.1 --seed 42 --output_dir {tmp_dir} --with_tracking --checkpointing_steps 1 '''.split() if is_cuda_and_apex_available(): testargs.append('--fp16' ) run_command(self._launch_args + testargs ) __snake_case : Optional[int] = get_results(__a ) # The base model scores a 25% self.assertGreaterEqual(result['eval_accuracy'] , 0.6 ) self.assertTrue(os.path.exists(os.path.join(__a , 'step_1' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'image_classification_no_trainer' ) ) )
0
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class snake_case__ : A__ = BlenderbotSmallConfig A__ = {} A__ = '''gelu''' def __init__( self : List[str] , __a : Optional[Any] , __a : Union[str, Any]=13 , __a : Dict=7 , __a : Tuple=True , __a : Union[str, Any]=False , __a : Optional[Any]=99 , __a : List[str]=32 , __a : Tuple=2 , __a : Dict=4 , __a : Any=37 , __a : Any=0.1 , __a : Tuple=0.1 , __a : Any=20 , __a : Tuple=2 , __a : Any=1 , __a : Optional[Any]=0 , ) -> Any: '''simple docstring''' __snake_case : int = parent __snake_case : Optional[Any] = batch_size __snake_case : List[Any] = seq_length __snake_case : int = is_training __snake_case : Optional[Any] = use_labels __snake_case : Union[str, Any] = vocab_size __snake_case : Dict = hidden_size __snake_case : Tuple = num_hidden_layers __snake_case : Optional[Any] = num_attention_heads __snake_case : Dict = intermediate_size __snake_case : int = hidden_dropout_prob __snake_case : str = attention_probs_dropout_prob __snake_case : Tuple = max_position_embeddings __snake_case : int = eos_token_id __snake_case : Optional[int] = pad_token_id __snake_case : Optional[int] = bos_token_id def A_ ( self : Optional[Any] ) -> Dict: '''simple docstring''' __snake_case : Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) __snake_case : Union[str, Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) __snake_case : int = tf.concat([input_ids, eos_tensor] , axis=1 ) __snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : Union[str, Any] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __snake_case : int = prepare_blenderbot_small_inputs_dict(__a , __a , __a ) return config, inputs_dict def A_ ( self : Any , __a : int , __a : List[str] ) -> Optional[Any]: '''simple docstring''' __snake_case : str = TFBlenderbotSmallModel(config=__a ).get_decoder() __snake_case : Any = inputs_dict['input_ids'] __snake_case : int = input_ids[:1, :] __snake_case : str = inputs_dict['attention_mask'][:1, :] __snake_case : int = inputs_dict['head_mask'] __snake_case : Optional[Any] = 1 # first forward pass __snake_case : Any = model(__a , attention_mask=__a , head_mask=__a , use_cache=__a ) __snake_case , __snake_case : Union[str, Any] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __snake_case : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) __snake_case : Dict = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and __snake_case : int = tf.concat([input_ids, next_tokens] , axis=-1 ) __snake_case : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) __snake_case : Tuple = model(__a , attention_mask=__a )[0] __snake_case : int = model(__a , attention_mask=__a , past_key_values=__a )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice __snake_case : Union[str, Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) __snake_case : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx] __snake_case : Dict = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(__a , __a , rtol=1e-3 ) def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Any ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : Any=None ,_UpperCAmelCase : Tuple=None ,_UpperCAmelCase : Optional[int]=None ,_UpperCAmelCase : Tuple=None ,) -> str: if attention_mask is None: __snake_case : Tuple = tf.cast(tf.math.not_equal(_UpperCAmelCase ,config.pad_token_id ) ,tf.inta ) if decoder_attention_mask is None: __snake_case : Any = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ), ] ,axis=-1 ,) if head_mask is None: __snake_case : int = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: __snake_case : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: __snake_case : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class snake_case__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): A__ = ( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) A__ = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () A__ = ( { '''conversational''': TFBlenderbotSmallForConditionalGeneration, '''feature-extraction''': TFBlenderbotSmallModel, '''summarization''': TFBlenderbotSmallForConditionalGeneration, '''text2text-generation''': TFBlenderbotSmallForConditionalGeneration, '''translation''': TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) A__ = True A__ = False A__ = False def A_ ( self : Any ) -> Tuple: '''simple docstring''' __snake_case : Optional[int] = TFBlenderbotSmallModelTester(self ) __snake_case : Tuple = ConfigTester(self , config_class=__a ) def A_ ( self : Tuple ) -> Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() def A_ ( self : Union[str, Any] ) -> Tuple: '''simple docstring''' __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*__a ) @require_tokenizers @require_tf class snake_case__ ( unittest.TestCase ): A__ = [ '''Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like ''' ''' i\'m going to throw up.\nand why is that?''' ] A__ = '''facebook/blenderbot_small-90M''' @cached_property def A_ ( self : int ) -> Union[str, Any]: '''simple docstring''' # use "old" tokenizer here because of bug when downloading new tokenizer return BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' ) @cached_property def A_ ( self : List[Any] ) -> Any: '''simple docstring''' __snake_case : Any = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def A_ ( self : Union[str, Any] ) -> Dict: '''simple docstring''' __snake_case : Any = self.tokenizer(self.src_text , return_tensors='tf' ) __snake_case : Dict = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__a , ) __snake_case : Union[str, Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__a )[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
0
'''simple docstring''' import math def a_ ( _UpperCAmelCase : int ) -> list: __snake_case : Optional[Any] = [True] * n __snake_case : Optional[int] = False __snake_case : Dict = False __snake_case : List[Any] = True for i in range(3 ,int(n**0.5 + 1 ) ,2 ): __snake_case : Optional[int] = i * 2 while index < n: __snake_case : Union[str, Any] = False __snake_case : int = index + i __snake_case : Dict = [2] for i in range(3 ,_UpperCAmelCase ,2 ): if is_prime[i]: primes.append(_UpperCAmelCase ) return primes def a_ ( _UpperCAmelCase : int = 99_99_66_66_33_33 ) -> int: __snake_case : List[Any] = math.floor(math.sqrt(_UpperCAmelCase ) ) + 1_00 __snake_case : Tuple = prime_sieve(_UpperCAmelCase ) __snake_case : List[Any] = 0 __snake_case : List[Any] = 0 __snake_case : Optional[int] = primes[prime_index] while (last_prime**2) <= limit: __snake_case : Optional[int] = primes[prime_index + 1] __snake_case : Union[str, Any] = last_prime**2 __snake_case : Dict = next_prime**2 # Get numbers divisible by lps(current) __snake_case : Optional[Any] = lower_bound + last_prime while upper_bound > current <= limit: matches_sum += current current += last_prime # Reset the upper_bound while (upper_bound - next_prime) > limit: upper_bound -= next_prime # Add the numbers divisible by ups(current) __snake_case : Optional[Any] = upper_bound - next_prime while current > lower_bound: matches_sum += current current -= next_prime # Remove the numbers divisible by both ups and lps __snake_case : List[str] = 0 while upper_bound > current <= limit: if current <= lower_bound: # Increment the current number current += last_prime * next_prime continue if current > limit: break # Remove twice since it was added by both ups and lps matches_sum -= current * 2 # Increment the current number current += last_prime * next_prime # Setup for next pair __snake_case : Dict = next_prime prime_index += 1 return matches_sum if __name__ == "__main__": print(solution())
0
1
'''simple docstring''' from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class snake_case__ : A__ = XGLMConfig A__ = {} A__ = '''gelu''' def __init__( self : Optional[Any] , __a : Optional[Any] , __a : int=14 , __a : Dict=7 , __a : Optional[Any]=True , __a : Optional[int]=True , __a : List[Any]=True , __a : Optional[int]=99 , __a : Union[str, Any]=32 , __a : Union[str, Any]=2 , __a : List[str]=4 , __a : Optional[int]=37 , __a : List[Any]="gelu" , __a : Tuple=0.1 , __a : str=0.1 , __a : int=512 , __a : Tuple=0.0_2 , ) -> Union[str, Any]: '''simple docstring''' __snake_case : Any = parent __snake_case : List[Any] = batch_size __snake_case : List[Any] = seq_length __snake_case : str = is_training __snake_case : str = use_input_mask __snake_case : Tuple = use_labels __snake_case : Optional[int] = vocab_size __snake_case : Dict = d_model __snake_case : str = num_hidden_layers __snake_case : Dict = num_attention_heads __snake_case : Optional[int] = ffn_dim __snake_case : Tuple = activation_function __snake_case : List[Any] = activation_dropout __snake_case : str = attention_dropout __snake_case : Union[str, Any] = max_position_embeddings __snake_case : Optional[int] = initializer_range __snake_case : Any = None __snake_case : Any = 0 __snake_case : Dict = 2 __snake_case : Union[str, Any] = 1 def A_ ( self : Tuple ) -> Any: '''simple docstring''' return XGLMConfig.from_pretrained('facebook/xglm-564M' ) def A_ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' __snake_case : Optional[int] = tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 ) __snake_case : List[str] = None if self.use_input_mask: __snake_case : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) __snake_case : int = self.get_config() __snake_case : List[str] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, ) def A_ ( self : Dict ) -> Union[str, Any]: '''simple docstring''' return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__a , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__a , ) def A_ ( self : List[Any] ) -> Dict: '''simple docstring''' __snake_case : List[str] = self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : List[Any] = config_and_inputs __snake_case : Union[str, Any] = { 'input_ids': input_ids, 'head_mask': head_mask, } return config, inputs_dict @require_tf class snake_case__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): A__ = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () A__ = (TFXGLMForCausalLM,) if is_tf_available() else () A__ = ( {'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {} ) A__ = False A__ = False A__ = False def A_ ( self : Any ) -> Optional[Any]: '''simple docstring''' __snake_case : List[str] = TFXGLMModelTester(self ) __snake_case : str = ConfigTester(self , config_class=__a , n_embd=37 ) def A_ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' self.config_tester.run_common_tests() @slow def A_ ( self : Dict ) -> int: '''simple docstring''' for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : str = TFXGLMModel.from_pretrained(__a ) self.assertIsNotNone(__a ) @unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' ) def A_ ( self : Any ) -> str: '''simple docstring''' super().test_resize_token_embeddings() @require_tf class snake_case__ ( unittest.TestCase ): @slow def A_ ( self : Optional[Any] , __a : Optional[Any]=True ) -> Any: '''simple docstring''' __snake_case : Optional[Any] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' ) __snake_case : Union[str, Any] = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off __snake_case : str = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581] # fmt: on __snake_case : int = model.generate(__a , do_sample=__a , num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , __a ) @slow def A_ ( self : Any ) -> int: '''simple docstring''' __snake_case : Union[str, Any] = XGLMTokenizer.from_pretrained('facebook/xglm-564M' ) __snake_case : int = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' ) tf.random.set_seed(0 ) __snake_case : int = tokenizer('Today is a nice day and' , return_tensors='tf' ) __snake_case : Any = tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(':/CPU:0' ): __snake_case : Dict = model.generate(__a , do_sample=__a , seed=[7, 0] ) __snake_case : Any = tokenizer.decode(output_ids[0] , skip_special_tokens=__a ) __snake_case : str = ( 'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due' ) self.assertEqual(__a , __a ) @slow def A_ ( self : Tuple ) -> List[Any]: '''simple docstring''' __snake_case : Optional[int] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' ) __snake_case : Dict = XGLMTokenizer.from_pretrained('facebook/xglm-564M' ) __snake_case : int = 'left' # use different length sentences to test batching __snake_case : Optional[Any] = [ 'This is an extremelly long sentence that only exists to test the ability of the model to cope with ' 'left-padding, such as in batched generation. The output for the sequence below should be the same ' 'regardless of whether left padding is applied or not. When', 'Hello, my dog is a little', ] __snake_case : int = tokenizer(__a , return_tensors='tf' , padding=__a ) __snake_case : List[Any] = inputs['input_ids'] __snake_case : Tuple = model.generate(input_ids=__a , attention_mask=inputs['attention_mask'] , max_new_tokens=12 ) __snake_case : List[str] = tokenizer(sentences[0] , return_tensors='tf' ).input_ids __snake_case : List[Any] = model.generate(input_ids=__a , max_new_tokens=12 ) __snake_case : List[str] = tokenizer(sentences[1] , return_tensors='tf' ).input_ids __snake_case : List[str] = model.generate(input_ids=__a , max_new_tokens=12 ) __snake_case : Optional[int] = tokenizer.batch_decode(__a , skip_special_tokens=__a ) __snake_case : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__a ) __snake_case : List[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=__a ) __snake_case : int = [ 'This is an extremelly long sentence that only exists to test the ability of the model to cope with ' 'left-padding, such as in batched generation. The output for the sequence below should be the same ' 'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be ' 'a single', 'Hello, my dog is a little bit of a shy one, but he is very friendly', ] self.assertListEqual(__a , __a ) self.assertListEqual(__a , [non_padded_sentence, padded_sentence] )
0
'''simple docstring''' def a_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ) -> float: return price * (1 + tax_rate) if __name__ == "__main__": print(F"""{price_plus_tax(1_0_0, 0.25) = }""") print(F"""{price_plus_tax(1_25.50, 0.05) = }""")
0
1
'''simple docstring''' def a_ ( _UpperCAmelCase : int ) -> int: assert isinstance(_UpperCAmelCase ,_UpperCAmelCase ), f'''The input value of [n={number}] is not an integer''' if number == 1: return 2 elif number < 1: __snake_case : Optional[Any] = f'''The input value of [n={number}] has to be > 0''' raise ValueError(_UpperCAmelCase ) else: __snake_case : Optional[int] = sylvester(number - 1 ) __snake_case : Tuple = num - 1 __snake_case : Dict = num return lower * upper + 1 if __name__ == "__main__": print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
0
'''simple docstring''' from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class snake_case__ ( SCREAMING_SNAKE_CASE_ ): def A_ ( self : List[Any] ) -> int: '''simple docstring''' __snake_case : Optional[int] = SMALL_MODEL_IDENTIFIER __snake_case : str = 'pt' __snake_case : Union[str, Any] = 'tf' def A_ ( self : Dict , __a : Tuple ) -> Dict: '''simple docstring''' __snake_case : Optional[int] = AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(__a ) def A_ ( self : Any , __a : Optional[Any] ) -> Dict: '''simple docstring''' __snake_case : Union[str, Any] = TFAutoModel.from_pretrained(self.test_model , from_pt=__a ) model_tf.save_pretrained(__a ) def A_ ( self : Any ) -> Tuple: '''simple docstring''' __snake_case : Tuple = 'mock_framework' # Framework provided - return whatever the user provides __snake_case : int = FeaturesManager.determine_framework(self.test_model , __a ) self.assertEqual(__a , __a ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(__a ) __snake_case : List[Any] = FeaturesManager.determine_framework(__a , __a ) self.assertEqual(__a , __a ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(__a ) __snake_case : Tuple = FeaturesManager.determine_framework(__a , __a ) self.assertEqual(__a , __a ) def A_ ( self : Union[str, Any] ) -> Any: '''simple docstring''' # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(__a ) __snake_case : Tuple = FeaturesManager.determine_framework(__a ) self.assertEqual(__a , self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(__a ) __snake_case : Union[str, Any] = FeaturesManager.determine_framework(__a ) self.assertEqual(__a , self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(__a ): __snake_case : Optional[int] = FeaturesManager.determine_framework(__a ) def A_ ( self : Any ) -> List[Any]: '''simple docstring''' __snake_case : Union[str, Any] = MagicMock(return_value=__a ) with patch('transformers.onnx.features.is_tf_available' , __a ): __snake_case : int = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(__a , self.framework_pt ) # PyTorch not in environment -> use TensorFlow __snake_case : Tuple = MagicMock(return_value=__a ) with patch('transformers.onnx.features.is_torch_available' , __a ): __snake_case : Dict = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(__a , self.framework_tf ) # Both in environment -> use PyTorch __snake_case : Optional[Any] = MagicMock(return_value=__a ) __snake_case : Tuple = MagicMock(return_value=__a ) with patch('transformers.onnx.features.is_tf_available' , __a ), patch( 'transformers.onnx.features.is_torch_available' , __a ): __snake_case : Dict = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(__a , self.framework_pt ) # Both not in environment -> raise error __snake_case : str = MagicMock(return_value=__a ) __snake_case : List[Any] = MagicMock(return_value=__a ) with patch('transformers.onnx.features.is_tf_available' , __a ), patch( 'transformers.onnx.features.is_torch_available' , __a ): with self.assertRaises(__a ): __snake_case : Tuple = FeaturesManager.determine_framework(self.test_model )
0
1
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional from packaging import version if TYPE_CHECKING: from ... import PreTrainedTokenizer, TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import is_torch_available, logging A__ : List[str] = logging.get_logger(__name__) A__ : str = { '''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/resolve/main/config.json''', '''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/config.json''', '''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json''', '''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json''', '''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/config.json''', '''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json''', } class snake_case__ ( SCREAMING_SNAKE_CASE_ ): A__ = '''bloom''' A__ = ['''past_key_values'''] A__ = { '''num_hidden_layers''': '''n_layer''', '''num_attention_heads''': '''n_head''', } def __init__( self : Optional[int] , __a : Optional[Any]=250880 , __a : Union[str, Any]=64 , __a : Any=2 , __a : Optional[int]=8 , __a : List[Any]=1e-5 , __a : List[Any]=0.0_2 , __a : Union[str, Any]=True , __a : Dict=1 , __a : str=2 , __a : Optional[int]=False , __a : List[str]=0.0 , __a : Union[str, Any]=0.0 , __a : Optional[int]=1 , __a : Tuple=False , **__a : Optional[int] , ) -> Optional[Any]: '''simple docstring''' __snake_case : str = vocab_size # Backward compatibility with n_embed kwarg __snake_case : str = kwargs.pop('n_embed' , __a ) __snake_case : Any = hidden_size if n_embed is None else n_embed __snake_case : Any = n_layer __snake_case : Union[str, Any] = n_head __snake_case : Optional[Any] = layer_norm_epsilon __snake_case : List[str] = initializer_range __snake_case : Dict = use_cache __snake_case : Union[str, Any] = pretraining_tp __snake_case : str = apply_residual_connection_post_layernorm __snake_case : Union[str, Any] = hidden_dropout __snake_case : Tuple = attention_dropout __snake_case : Dict = bos_token_id __snake_case : Optional[Any] = eos_token_id __snake_case : Any = slow_but_exact super().__init__(bos_token_id=__a , eos_token_id=__a , **__a ) class snake_case__ ( SCREAMING_SNAKE_CASE_ ): A__ = version.parse('''1.12''' ) def __init__( self : List[Any] , __a : PretrainedConfig , __a : str = "default" , __a : List[PatchingSpec] = None , __a : bool = False , ) -> Optional[int]: '''simple docstring''' super().__init__(__a , task=__a , patching_specs=__a , use_past=__a ) if not getattr(self._config , 'pad_token_id' , __a ): # TODO: how to do that better? __snake_case : Optional[int] = 0 @property def A_ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' __snake_case : int = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: # BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344 self.fill_with_past_key_values_(__a , direction='inputs' , inverted_values_shape=__a ) __snake_case : str = {0: 'batch', 1: 'past_sequence + sequence'} else: __snake_case : Optional[Any] = {0: 'batch', 1: 'sequence'} return common_inputs @property def A_ ( self : Tuple ) -> int: '''simple docstring''' return self._config.n_layer @property def A_ ( self : Any ) -> int: '''simple docstring''' return self._config.n_head @property def A_ ( self : Tuple ) -> float: '''simple docstring''' return 1e-3 def A_ ( self : Tuple , __a : "PreTrainedTokenizer" , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional["TensorType"] = None , ) -> Mapping[str, Any]: '''simple docstring''' __snake_case : List[Any] = super(__a , self ).generate_dummy_inputs( __a , batch_size=__a , seq_length=__a , is_pair=__a , framework=__a ) # We need to order the input in the way they appears in the forward() __snake_case : Any = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch __snake_case , __snake_case : str = common_inputs['input_ids'].shape # Not using the same length for past_key_values __snake_case : Tuple = seqlen + 2 __snake_case : str = self._config.hidden_size // self.num_attention_heads __snake_case : Dict = ( batch * self.num_attention_heads, head_dim, past_key_values_length, ) __snake_case : List[Any] = ( batch * self.num_attention_heads, past_key_values_length, head_dim, ) __snake_case : List[Any] = [ (torch.zeros(__a ), torch.zeros(__a )) for _ in range(self.num_layers ) ] __snake_case : int = common_inputs['attention_mask'] if self.use_past: __snake_case : int = ordered_inputs['attention_mask'].dtype __snake_case : Optional[int] = torch.cat( [ordered_inputs['attention_mask'], torch.ones(__a , __a , dtype=__a )] , dim=1 ) return ordered_inputs @property def A_ ( self : Optional[int] ) -> int: '''simple docstring''' return 13
0
'''simple docstring''' import os import unittest from transformers import BatchEncoding from transformers.models.bert.tokenization_bert import ( BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer from transformers.testing_utils import require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): A__ = ProphetNetTokenizer A__ = False def A_ ( self : Optional[int] ) -> Dict: '''simple docstring''' super().setUp() __snake_case : Dict = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] __snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def A_ ( self : int , __a : Union[str, Any] ) -> List[str]: '''simple docstring''' __snake_case : Optional[int] = 'UNwant\u00E9d,running' __snake_case : List[str] = 'unwanted, running' return input_text, output_text def A_ ( self : Union[str, Any] ) -> str: '''simple docstring''' __snake_case : Dict = self.tokenizer_class(self.vocab_file ) __snake_case : List[str] = tokenizer.tokenize('UNwant\u00E9d,running' ) self.assertListEqual(__a , ['un', '##want', '##ed', ',', 'runn', '##ing'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [9, 6, 7, 12, 10, 11] ) def A_ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' __snake_case : List[str] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] ) def A_ ( self : Union[str, Any] ) -> str: '''simple docstring''' __snake_case : Optional[int] = BasicTokenizer(do_lower_case=__a ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def A_ ( self : Dict ) -> Optional[int]: '''simple docstring''' __snake_case : List[Any] = BasicTokenizer(do_lower_case=__a , strip_accents=__a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] ) def A_ ( self : int ) -> Any: '''simple docstring''' __snake_case : int = BasicTokenizer(do_lower_case=__a , strip_accents=__a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def A_ ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' __snake_case : Union[str, Any] = BasicTokenizer(do_lower_case=__a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def A_ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' __snake_case : Dict = BasicTokenizer(do_lower_case=__a ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] ) def A_ ( self : Any ) -> List[str]: '''simple docstring''' __snake_case : str = BasicTokenizer(do_lower_case=__a , strip_accents=__a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] ) def A_ ( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' __snake_case : List[Any] = BasicTokenizer(do_lower_case=__a , strip_accents=__a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] ) def A_ ( self : Optional[int] ) -> List[str]: '''simple docstring''' __snake_case : Optional[Any] = BasicTokenizer(do_lower_case=__a , never_split=['[UNK]'] ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] ) def A_ ( self : Optional[int] ) -> List[Any]: '''simple docstring''' __snake_case : Any = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing'] __snake_case : List[Any] = {} for i, token in enumerate(__a ): __snake_case : List[str] = i __snake_case : Any = WordpieceTokenizer(vocab=__a , unk_token='[UNK]' ) self.assertListEqual(tokenizer.tokenize('' ) , [] ) self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] ) self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] ) @require_torch def A_ ( self : Union[str, Any] ) -> Tuple: '''simple docstring''' __snake_case : Optional[Any] = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' ) __snake_case : int = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] __snake_case : str = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102] __snake_case : Union[str, Any] = tokenizer(__a , padding=__a , return_tensors='pt' ) self.assertIsInstance(__a , __a ) __snake_case : int = list(batch.input_ids.numpy()[0] ) self.assertListEqual(__a , __a ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) def A_ ( self : Union[str, Any] ) -> Any: '''simple docstring''' self.assertTrue(_is_whitespace(' ' ) ) self.assertTrue(_is_whitespace('\t' ) ) self.assertTrue(_is_whitespace('\r' ) ) self.assertTrue(_is_whitespace('\n' ) ) self.assertTrue(_is_whitespace('\u00A0' ) ) self.assertFalse(_is_whitespace('A' ) ) self.assertFalse(_is_whitespace('-' ) ) def A_ ( self : Dict ) -> Optional[Any]: '''simple docstring''' self.assertTrue(_is_control('\u0005' ) ) self.assertFalse(_is_control('A' ) ) self.assertFalse(_is_control(' ' ) ) self.assertFalse(_is_control('\t' ) ) self.assertFalse(_is_control('\r' ) ) def A_ ( self : List[Any] ) -> int: '''simple docstring''' self.assertTrue(_is_punctuation('-' ) ) self.assertTrue(_is_punctuation('$' ) ) self.assertTrue(_is_punctuation('`' ) ) self.assertTrue(_is_punctuation('.' ) ) self.assertFalse(_is_punctuation('A' ) ) self.assertFalse(_is_punctuation(' ' ) ) @slow def A_ ( self : str ) -> Optional[int]: '''simple docstring''' __snake_case : str = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' ) __snake_case : Optional[int] = tokenizer.encode('sequence builders' , add_special_tokens=__a ) __snake_case : Optional[int] = tokenizer.encode('multi-sequence build' , add_special_tokens=__a ) __snake_case : Optional[Any] = tokenizer.build_inputs_with_special_tokens(__a ) __snake_case : List[Any] = tokenizer.build_inputs_with_special_tokens(__a , __a ) assert encoded_sentence == text + [102] assert encoded_pair == text + [102] + text_a + [102]
0
1
'''simple docstring''' print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A__ : Optional[Any] = { '''configuration_nllb_moe''': [ '''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NllbMoeConfig''', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Dict = [ '''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''NllbMoeForConditionalGeneration''', '''NllbMoeModel''', '''NllbMoePreTrainedModel''', '''NllbMoeTop2Router''', '''NllbMoeSparseMLP''', ] if TYPE_CHECKING: from .configuration_nllb_moe import ( NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, NllbMoeConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nllb_moe import ( NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST, NllbMoeForConditionalGeneration, NllbMoeModel, NllbMoePreTrainedModel, NllbMoeSparseMLP, NllbMoeTopaRouter, ) else: import sys A__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
0
1
'''simple docstring''' import warnings from ...utils import logging from .image_processing_poolformer import PoolFormerImageProcessor A__ : int = logging.get_logger(__name__) class snake_case__ ( SCREAMING_SNAKE_CASE_ ): def __init__( self : Tuple , *__a : Optional[int] , **__a : int ) -> None: '''simple docstring''' warnings.warn( 'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use PoolFormerImageProcessor instead.' , __a , ) super().__init__(*__a , **__a )
0
'''simple docstring''' def a_ ( _UpperCAmelCase : int ) -> list: # bit count represents no. of bits in the gray code if bit_count < 0: raise ValueError('The given input must be positive' ) # get the generated string sequence __snake_case : Optional[Any] = gray_code_sequence_string(_UpperCAmelCase ) # # convert them to integers for i in range(len(_UpperCAmelCase ) ): __snake_case : Optional[Any] = int(sequence[i] ,2 ) return sequence def a_ ( _UpperCAmelCase : int ) -> list: # The approach is a recursive one # Base case achieved when either n = 0 or n=1 if bit_count == 0: return ["0"] if bit_count == 1: return ["0", "1"] __snake_case : Dict = 1 << bit_count # defines the length of the sequence # 1<< n is equivalent to 2^n # recursive answer will generate answer for n-1 bits __snake_case : Dict = gray_code_sequence_string(bit_count - 1 ) __snake_case : Any = [] # append 0 to first half of the smaller sequence generated for i in range(seq_len // 2 ): __snake_case : str = '0' + smaller_sequence[i] sequence.append(_UpperCAmelCase ) # append 1 to second half ... start from the end of the list for i in reversed(range(seq_len // 2 ) ): __snake_case : Any = '1' + smaller_sequence[i] sequence.append(_UpperCAmelCase ) return sequence if __name__ == "__main__": import doctest doctest.testmod()
0
1
'''simple docstring''' import unittest from transformers import AutoTokenizer, FalconConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, ) class snake_case__ : def __init__( self : List[str] , __a : int , __a : str=3 , __a : List[str]=7 , __a : Tuple=True , __a : Optional[int]=True , __a : Union[str, Any]=False , __a : Optional[int]=True , __a : Optional[int]=99 , __a : Dict=32 , __a : Union[str, Any]=5 , __a : List[str]=4 , __a : int=37 , __a : Tuple="gelu" , __a : Any=0.1 , __a : Union[str, Any]=0.1 , __a : Optional[int]=512 , __a : Tuple=16 , __a : Dict=2 , __a : Optional[int]=0.0_2 , __a : List[str]=3 , __a : Dict=4 , __a : List[str]=None , ) -> Optional[int]: '''simple docstring''' __snake_case : int = parent __snake_case : Optional[int] = batch_size __snake_case : Dict = seq_length __snake_case : Any = is_training __snake_case : Tuple = use_input_mask __snake_case : List[str] = use_token_type_ids __snake_case : Tuple = use_labels __snake_case : int = vocab_size __snake_case : Optional[Any] = hidden_size __snake_case : List[str] = num_hidden_layers __snake_case : int = num_attention_heads __snake_case : Dict = intermediate_size __snake_case : List[Any] = hidden_act __snake_case : Dict = hidden_dropout_prob __snake_case : List[Any] = attention_probs_dropout_prob __snake_case : Optional[Any] = max_position_embeddings __snake_case : List[Any] = type_vocab_size __snake_case : Optional[Any] = type_sequence_label_size __snake_case : Dict = initializer_range __snake_case : Tuple = num_labels __snake_case : int = num_choices __snake_case : Any = scope def A_ ( self : Optional[Any] ) -> int: '''simple docstring''' __snake_case : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : Any = None if self.use_input_mask: __snake_case : Any = random_attention_mask([self.batch_size, self.seq_length] ) __snake_case : Any = None __snake_case : int = None __snake_case : int = None __snake_case : Any = None if self.use_labels: __snake_case : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __snake_case : Optional[int] = ids_tensor([self.batch_size] , self.num_choices ) __snake_case : int = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A_ ( self : Dict ) -> List[str]: '''simple docstring''' return FalconConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=__a , ) def A_ ( self : Tuple , __a : Optional[int] , __a : Optional[Any] , __a : Dict , __a : str , __a : Optional[int] , __a : Tuple , __a : Optional[Any] ) -> Any: '''simple docstring''' __snake_case : Any = FalconModel(config=__a ) model.to(__a ) model.eval() __snake_case : Union[str, Any] = model(__a , attention_mask=__a ) __snake_case : str = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A_ ( self : Dict , __a : Dict , __a : List[Any] , __a : Tuple , __a : Union[str, Any] , __a : Any , __a : List[Any] , __a : Optional[int] , __a : Tuple , __a : List[str] , ) -> List[str]: '''simple docstring''' __snake_case : List[str] = True __snake_case : Any = FalconModel(__a ) model.to(__a ) model.eval() __snake_case : Dict = model( __a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , ) __snake_case : Optional[int] = model( __a , attention_mask=__a , encoder_hidden_states=__a , ) __snake_case : str = model(__a , attention_mask=__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A_ ( self : str , __a : Optional[Any] , __a : Any , __a : int , __a : Union[str, Any] , __a : Optional[int] , __a : int , __a : Optional[int] , __a : Union[str, Any] , __a : Union[str, Any] , ) -> Optional[Any]: '''simple docstring''' __snake_case : Dict = FalconForCausalLM(config=__a ) model.to(__a ) model.eval() __snake_case : str = model(__a , attention_mask=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A_ ( self : Union[str, Any] , __a : Tuple , __a : List[Any] , __a : int , __a : Union[str, Any] , __a : Optional[Any] , __a : Optional[Any] , __a : Any , __a : Any , __a : Tuple , ) -> int: '''simple docstring''' __snake_case : Union[str, Any] = True __snake_case : int = True __snake_case : Optional[int] = FalconForCausalLM(config=__a ) model.to(__a ) model.eval() # first forward pass __snake_case : Optional[Any] = model( __a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , use_cache=__a , ) __snake_case : Any = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __snake_case : str = ids_tensor((self.batch_size, 3) , config.vocab_size ) __snake_case : str = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and __snake_case : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) __snake_case : List[str] = torch.cat([input_mask, next_mask] , dim=-1 ) __snake_case : Dict = model( __a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , output_hidden_states=__a , )['hidden_states'][0] __snake_case : Union[str, Any] = model( __a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , past_key_values=__a , output_hidden_states=__a , )['hidden_states'][0] # select random slice __snake_case : str = ids_tensor((1,) , output_from_past.shape[-1] ).item() __snake_case : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach() __snake_case : Dict = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__a , __a , atol=1e-3 ) ) def A_ ( self : Optional[Any] ) -> int: '''simple docstring''' __snake_case : Union[str, Any] = self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : List[Any] = config_and_inputs __snake_case : Any = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class snake_case__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): A__ = ( ( FalconModel, FalconForCausalLM, FalconForSequenceClassification, FalconForTokenClassification, FalconForQuestionAnswering, ) if is_torch_available() else () ) A__ = (FalconForCausalLM,) if is_torch_available() else () A__ = ( { '''feature-extraction''': FalconModel, '''text-classification''': FalconForSequenceClassification, '''text-generation''': FalconForCausalLM, '''question-answering''': FalconForQuestionAnswering, '''token-classification''': FalconForTokenClassification, '''zero-shot''': FalconForSequenceClassification, } if is_torch_available() else {} ) A__ = False A__ = False def A_ ( self : Any ) -> Union[str, Any]: '''simple docstring''' __snake_case : Tuple = FalconModelTester(self ) __snake_case : List[Any] = ConfigTester(self , config_class=__a , hidden_size=37 ) def A_ ( self : Any ) -> Tuple: '''simple docstring''' self.config_tester.run_common_tests() def A_ ( self : str ) -> Optional[int]: '''simple docstring''' __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def A_ ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' __snake_case , *__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() for alibi in [True, False]: __snake_case : List[Any] = alibi self.model_tester.create_and_check_model(__a , *__a ) def A_ ( self : Any ) -> Union[str, Any]: '''simple docstring''' __snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : Optional[Any] = 3 __snake_case : str = input_dict['input_ids'] __snake_case : Dict = input_ids.ne(1 ).to(__a ) __snake_case : Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __snake_case : Any = FalconForSequenceClassification(__a ) model.to(__a ) model.eval() __snake_case : List[str] = model(__a , attention_mask=__a , labels=__a ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def A_ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' __snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : str = 3 __snake_case : int = 'single_label_classification' __snake_case : Tuple = input_dict['input_ids'] __snake_case : Dict = input_ids.ne(1 ).to(__a ) __snake_case : List[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __snake_case : List[Any] = FalconForSequenceClassification(__a ) model.to(__a ) model.eval() __snake_case : Dict = model(__a , attention_mask=__a , labels=__a ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def A_ ( self : int ) -> Optional[int]: '''simple docstring''' __snake_case , __snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : List[Any] = input_dict['input_ids'] __snake_case : Union[str, Any] = FalconForCausalLM(__a ) model.to(__a ) model.eval() __snake_case : List[Any] = model(__a , use_cache=__a ) __snake_case : int = input_ids.shape[0] __snake_case : Tuple = model._convert_to_rw_cache(result.past_key_values ) __snake_case : Any = model._convert_cache_to_standard_format(__a , __a ) for layer in range(len(__a ) ): for tensor_idx in range(2 ): self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 ) self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 ) self.assertTrue( torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) ) def A_ ( self : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' __snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : Union[str, Any] = 3 __snake_case : Optional[Any] = 'multi_label_classification' __snake_case : str = input_dict['input_ids'] __snake_case : str = input_ids.ne(1 ).to(__a ) __snake_case : Union[str, Any] = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) __snake_case : str = FalconForSequenceClassification(__a ) model.to(__a ) model.eval() __snake_case : str = model(__a , attention_mask=__a , labels=__a ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def A_ ( self : Tuple ) -> int: '''simple docstring''' # Falcon can have different numbers of KV-heads than the number of query heads, so we need # to override this test to use the right head counts. for model_class in self.all_generative_model_classes: __snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() # If it doesn't support cache, pass the test if not hasattr(__a , 'use_cache' ): return __snake_case : Dict = model_class(__a ).to(__a ) if "use_cache" not in inputs: __snake_case : str = True __snake_case : Optional[Any] = model(**__a ) # If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format) if "past_key_values" not in outputs: return __snake_case : Tuple = ( getattr(__a , 'decoder_layers' , __a ) or getattr(__a , 'num_decoder_layers' , __a ) or config.num_hidden_layers ) __snake_case : Optional[int] = getattr(__a , 'num_kv_heads' , config.num_attention_heads ) __snake_case : List[str] = getattr(__a , 'd_model' , config.hidden_size ) __snake_case : List[str] = embed_dim // num_attention_heads __snake_case : Optional[int] = outputs['past_key_values'] self.assertEqual(len(__a ) , __a ) __snake_case , __snake_case : int = inputs['input_ids'].shape for i in range(__a ): if config.new_decoder_architecture: __snake_case : str = config.num_attention_heads elif config.multi_query: __snake_case : Optional[Any] = 1 self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2 self.assertEqual( past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) self.assertEqual( past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) @require_torch class snake_case__ ( unittest.TestCase ): @slow def A_ ( self : str ) -> str: '''simple docstring''' __snake_case : Dict = AutoTokenizer.from_pretrained('Rocketknight1/falcon-rw-1b' ) __snake_case : Union[str, Any] = FalconForCausalLM.from_pretrained('Rocketknight1/falcon-rw-1b' ) model.eval() model.to(__a ) __snake_case : int = tokenizer('My favorite food is' , return_tensors='pt' ).to(__a ) __snake_case : List[str] = ( 'My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.' ) __snake_case : int = model.generate(**__a , do_sample=__a , max_new_tokens=19 ) __snake_case : Union[str, Any] = tokenizer.batch_decode(__a )[0] self.assertEqual(__a , __a ) @slow def A_ ( self : int ) -> int: '''simple docstring''' # The big models are way too big for the CI, so we use tiny random models that resemble their # architectures but with much smaller and fewer layers for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]: __snake_case : Optional[int] = AutoTokenizer.from_pretrained(__a ) __snake_case : str = FalconForCausalLM.from_pretrained(__a ) model.eval() model.to(__a ) __snake_case : int = tokenizer('My favorite food is' , return_tensors='pt' ).to(__a ) # We just test that these run without errors - the models are randomly initialized # and so the actual text outputs will be garbage model.generate(**__a , do_sample=__a , max_new_tokens=4 ) model.generate(**__a , do_sample=__a , max_new_tokens=4 ) model.generate(**__a , num_beams=2 , max_new_tokens=4 ) @slow def A_ ( self : List[Any] ) -> int: '''simple docstring''' # The big models are way too big for the CI, so we use tiny random models that resemble their # architectures but with much smaller and fewer layers with torch.no_grad(): for repo in [ "Rocketknight1/falcon-rw-1b", "Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b", ]: __snake_case : Union[str, Any] = AutoTokenizer.from_pretrained(__a ) __snake_case : List[str] = FalconForCausalLM.from_pretrained(__a ) model.eval() model.to(device=__a ) __snake_case : Optional[int] = tokenizer('My favorite food is' , return_tensors='pt' ).to(__a ) # Test results are the same with and without cache __snake_case : Dict = model.generate(**__a , do_sample=__a , max_new_tokens=20 , use_cache=__a ) __snake_case : str = model.generate(**__a , do_sample=__a , max_new_tokens=20 , use_cache=__a ) self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
0
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class snake_case__ ( unittest.TestCase ): def A_ ( self : int ) -> List[Any]: '''simple docstring''' __snake_case : Any = tempfile.mkdtemp() # fmt: off __snake_case : List[str] = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest'] # fmt: on __snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) __snake_case : List[str] = { 'do_resize': True, 'size': {'height': 18, 'width': 18}, 'do_normalize': True, 'image_mean': [0.5, 0.5, 0.5], 'image_std': [0.5, 0.5, 0.5], } __snake_case : Optional[Any] = os.path.join(self.tmpdirname , __a ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(__a , __a ) def A_ ( self : Optional[int] , **__a : Dict ) -> int: '''simple docstring''' return BertTokenizer.from_pretrained(self.tmpdirname , **__a ) def A_ ( self : int , **__a : Dict ) -> Tuple: '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname , **__a ) def A_ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def A_ ( self : str ) -> List[str]: '''simple docstring''' __snake_case : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] __snake_case : List[str] = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs] return image_inputs def A_ ( self : List[str] ) -> Optional[int]: '''simple docstring''' __snake_case : Union[str, Any] = self.get_tokenizer() __snake_case : Dict = self.get_image_processor() __snake_case : Any = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) processor.save_pretrained(self.tmpdirname ) __snake_case : Any = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , __a ) def A_ ( self : str ) -> Optional[int]: '''simple docstring''' __snake_case : Optional[Any] = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __snake_case : Optional[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) __snake_case : Tuple = self.get_image_processor(do_normalize=__a , padding_value=1.0 ) __snake_case : Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __a ) def A_ ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' __snake_case : Tuple = self.get_image_processor() __snake_case : int = self.get_tokenizer() __snake_case : str = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) __snake_case : int = self.prepare_image_inputs() __snake_case : List[str] = image_processor(__a , return_tensors='np' ) __snake_case : List[str] = processor(images=__a , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def A_ ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' __snake_case : Dict = self.get_image_processor() __snake_case : int = self.get_tokenizer() __snake_case : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) __snake_case : Optional[int] = 'lower newer' __snake_case : Dict = processor(text=__a ) __snake_case : List[Any] = tokenizer(__a ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def A_ ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' __snake_case : Dict = self.get_image_processor() __snake_case : Union[str, Any] = self.get_tokenizer() __snake_case : int = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) __snake_case : List[Any] = 'lower newer' __snake_case : Optional[Any] = self.prepare_image_inputs() __snake_case : Union[str, Any] = processor(text=__a , images=__a ) self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with self.assertRaises(__a ): processor() def A_ ( self : Tuple ) -> Any: '''simple docstring''' __snake_case : Union[str, Any] = self.get_image_processor() __snake_case : Any = self.get_tokenizer() __snake_case : Dict = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) __snake_case : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __snake_case : int = processor.batch_decode(__a ) __snake_case : Optional[Any] = tokenizer.batch_decode(__a ) self.assertListEqual(__a , __a ) def A_ ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' __snake_case : List[str] = self.get_image_processor() __snake_case : Dict = self.get_tokenizer() __snake_case : Dict = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) __snake_case : Union[str, Any] = 'lower newer' __snake_case : Tuple = self.prepare_image_inputs() __snake_case : Union[str, Any] = processor(text=__a , images=__a ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
0
1
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax A__ : int = logging.get_logger(__name__) @add_end_docstrings(SCREAMING_SNAKE_CASE_ ) class snake_case__ ( SCREAMING_SNAKE_CASE_ ): def __init__( self : Dict , **__a : Any ) -> Optional[Any]: '''simple docstring''' super().__init__(**__a ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self : Optional[Any] , __a : Union[str, List[str], "Image", List["Image"]] , **__a : Dict ) -> Optional[int]: '''simple docstring''' return super().__call__(__a , **__a ) def A_ ( self : List[Any] , **__a : Optional[int] ) -> Union[str, Any]: '''simple docstring''' __snake_case : List[str] = {} if "candidate_labels" in kwargs: __snake_case : str = kwargs['candidate_labels'] if "hypothesis_template" in kwargs: __snake_case : List[Any] = kwargs['hypothesis_template'] return preprocess_params, {}, {} def A_ ( self : Union[str, Any] , __a : Dict , __a : List[Any]=None , __a : Any="This is a photo of {}." ) -> Any: '''simple docstring''' __snake_case : Any = load_image(__a ) __snake_case : Optional[Any] = self.image_processor(images=[image] , return_tensors=self.framework ) __snake_case : int = candidate_labels __snake_case : str = [hypothesis_template.format(__a ) for x in candidate_labels] __snake_case : Any = self.tokenizer(__a , return_tensors=self.framework , padding=__a ) __snake_case : Tuple = [text_inputs] return inputs def A_ ( self : Dict , __a : int ) -> int: '''simple docstring''' __snake_case : int = model_inputs.pop('candidate_labels' ) __snake_case : Optional[int] = model_inputs.pop('text_inputs' ) if isinstance(text_inputs[0] , __a ): __snake_case : List[Any] = text_inputs[0] else: # Batching case. __snake_case : List[Any] = text_inputs[0][0] __snake_case : Optional[int] = self.model(**__a , **__a ) __snake_case : Tuple = { 'candidate_labels': candidate_labels, 'logits': outputs.logits_per_image, } return model_outputs def A_ ( self : Tuple , __a : List[str] ) -> Dict: '''simple docstring''' __snake_case : int = model_outputs.pop('candidate_labels' ) __snake_case : Optional[int] = model_outputs['logits'][0] if self.framework == "pt": __snake_case : Optional[Any] = logits.softmax(dim=-1 ).squeeze(-1 ) __snake_case : Any = probs.tolist() if not isinstance(__a , __a ): __snake_case : List[Any] = [scores] elif self.framework == "tf": __snake_case : List[str] = stable_softmax(__a , axis=-1 ) __snake_case : Tuple = probs.numpy().tolist() else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) __snake_case : str = [ {'score': score, 'label': candidate_label} for score, candidate_label in sorted(zip(__a , __a ) , key=lambda __a : -x[0] ) ] return result
0
'''simple docstring''' import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def a_ ( _UpperCAmelCase : List[Any] ) -> Tuple: __snake_case : str = [] embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''', f'''stage{idx}.patch_embed.proj.weight''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''', f'''stage{idx}.patch_embed.proj.bias''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''', f'''stage{idx}.patch_embed.norm.weight''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''', f'''stage{idx}.patch_embed.norm.bias''', ) ) return embed def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : Optional[int] ) -> List[str]: __snake_case : Tuple = [] attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj.bias''', ) ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') ) return attention_weights def a_ ( _UpperCAmelCase : Union[str, Any] ) -> Dict: __snake_case : Union[str, Any] = [] token.append((f'''cvt.encoder.stages.{idx}.cls_token''', 'stage2.cls_token') ) return token def a_ ( ) -> Optional[Any]: __snake_case : Any = [] head.append(('layernorm.weight', 'norm.weight') ) head.append(('layernorm.bias', 'norm.bias') ) head.append(('classifier.weight', 'head.weight') ) head.append(('classifier.bias', 'head.bias') ) return head def a_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Any ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[Any] ) -> Tuple: __snake_case : List[str] = 'imagenet-1k-id2label.json' __snake_case : Dict = 10_00 __snake_case : Union[str, Any] = 'huggingface/label-files' __snake_case : str = num_labels __snake_case : str = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase ,_UpperCAmelCase ,repo_type='dataset' ) ) ,'r' ) ) __snake_case : Tuple = {int(_UpperCAmelCase ): v for k, v in idalabel.items()} __snake_case : Optional[Any] = idalabel __snake_case : str = {v: k for k, v in idalabel.items()} __snake_case : Dict = CvtConfig(num_labels=_UpperCAmelCase ,idalabel=_UpperCAmelCase ,labelaid=_UpperCAmelCase ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit('/' ,1 )[-1][4:6] == "13": __snake_case : Tuple = [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit('/' ,1 )[-1][4:6] == "21": __snake_case : str = [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: __snake_case : Dict = [2, 2, 20] __snake_case : Any = [3, 12, 16] __snake_case : Tuple = [1_92, 7_68, 10_24] __snake_case : str = CvtForImageClassification(_UpperCAmelCase ) __snake_case : List[Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ) __snake_case : int = image_size __snake_case : int = torch.load(_UpperCAmelCase ,map_location=torch.device('cpu' ) ) __snake_case : List[Any] = OrderedDict() __snake_case : Union[str, Any] = [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: __snake_case : Optional[Any] = list_of_state_dict + cls_token(_UpperCAmelCase ) __snake_case : Tuple = list_of_state_dict + embeddings(_UpperCAmelCase ) for cnt in range(config.depth[idx] ): __snake_case : Optional[int] = list_of_state_dict + attention(_UpperCAmelCase ,_UpperCAmelCase ) __snake_case : str = list_of_state_dict + final() for gg in list_of_state_dict: print(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) ): __snake_case : List[str] = original_weights[list_of_state_dict[i][1]] model.load_state_dict(_UpperCAmelCase ) model.save_pretrained(_UpperCAmelCase ) image_processor.save_pretrained(_UpperCAmelCase ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": A__ : Dict = argparse.ArgumentParser() parser.add_argument( '''--cvt_model''', default='''cvt-w24''', type=str, help='''Name of the cvt model you\'d like to convert.''', ) parser.add_argument( '''--image_size''', default=3_8_4, type=int, help='''Input Image Size''', ) parser.add_argument( '''--cvt_file_name''', default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''', type=str, help='''Input Image Size''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) A__ : Tuple = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
0
1
'''simple docstring''' import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): A__ = DiTPipeline A__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS A__ = PipelineTesterMixin.required_optional_params - { '''latents''', '''num_images_per_prompt''', '''callback''', '''callback_steps''', } A__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS A__ = False def A_ ( self : List[Any] ) -> Union[str, Any]: '''simple docstring''' torch.manual_seed(0 ) __snake_case : List[Any] = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=__a , activation_fn='gelu-approximate' , num_embeds_ada_norm=1000 , norm_type='ada_norm_zero' , norm_elementwise_affine=__a , ) __snake_case : Dict = AutoencoderKL() __snake_case : Tuple = DDIMScheduler() __snake_case : int = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler} return components def A_ ( self : List[Any] , __a : Optional[int] , __a : Tuple=0 ) -> Union[str, Any]: '''simple docstring''' if str(__a ).startswith('mps' ): __snake_case : Dict = torch.manual_seed(__a ) else: __snake_case : Any = torch.Generator(device=__a ).manual_seed(__a ) __snake_case : int = { 'class_labels': [1], 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def A_ ( self : Tuple ) -> Optional[int]: '''simple docstring''' __snake_case : Optional[Any] = 'cpu' __snake_case : Union[str, Any] = self.get_dummy_components() __snake_case : List[Any] = self.pipeline_class(**__a ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) __snake_case : List[Any] = self.get_dummy_inputs(__a ) __snake_case : Optional[int] = pipe(**__a ).images __snake_case : Optional[Any] = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) __snake_case : Union[str, Any] = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] ) __snake_case : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(__a , 1e-3 ) def A_ ( self : Union[str, Any] ) -> List[str]: '''simple docstring''' self._test_inference_batch_single_identical(relax_max_difference=__a , expected_max_diff=1e-3 ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def A_ ( self : Union[str, Any] ) -> int: '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @require_torch_gpu @slow class snake_case__ ( unittest.TestCase ): def A_ ( self : List[Any] ) -> int: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def A_ ( self : Dict ) -> Any: '''simple docstring''' __snake_case : Optional[int] = torch.manual_seed(0 ) __snake_case : int = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' ) pipe.to('cuda' ) __snake_case : Dict = ['vase', 'umbrella', 'white shark', 'white wolf'] __snake_case : Any = pipe.get_label_ids(__a ) __snake_case : str = pipe(__a , generator=__a , num_inference_steps=40 , output_type='np' ).images for word, image in zip(__a , __a ): __snake_case : List[str] = load_numpy( f'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' ) assert np.abs((expected_image - image).max() ) < 1e-2 def A_ ( self : int ) -> str: '''simple docstring''' __snake_case : List[Any] = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' ) __snake_case : Tuple = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to('cuda' ) __snake_case : List[str] = ['vase', 'umbrella'] __snake_case : int = pipe.get_label_ids(__a ) __snake_case : Dict = torch.manual_seed(0 ) __snake_case : Dict = pipe(__a , generator=__a , num_inference_steps=25 , output_type='np' ).images for word, image in zip(__a , __a ): __snake_case : str = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' f'''/dit/{word}_512.npy''' ) assert np.abs((expected_image - image).max() ) < 1e-1
0
'''simple docstring''' from __future__ import annotations A__ : List[Any] = list[list[int]] # assigning initial values to the grid A__ : Matrix = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution A__ : Matrix = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def a_ ( _UpperCAmelCase : Matrix ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ) -> bool: for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def a_ ( _UpperCAmelCase : Matrix ) -> tuple[int, int] | None: for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def a_ ( _UpperCAmelCase : Matrix ) -> Matrix | None: if location := find_empty_location(_UpperCAmelCase ): __snake_case , __snake_case : Optional[int] = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 ,10 ): if is_safe(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ): __snake_case : Union[str, Any] = digit if sudoku(_UpperCAmelCase ) is not None: return grid __snake_case : Optional[Any] = 0 return None def a_ ( _UpperCAmelCase : Matrix ) -> None: for row in grid: for cell in row: print(_UpperCAmelCase ,end=' ' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print('''\nExample grid:\n''' + '''=''' * 2_0) print_solution(example_grid) print('''\nExample grid solution:''') A__ : List[str] = sudoku(example_grid) if solution is not None: print_solution(solution) else: print('''Cannot find a solution.''')
0
1
'''simple docstring''' from typing import Dict, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract A__ : int = logging.get_logger(__name__) def a_ ( _UpperCAmelCase : Any ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Union[str, Any] ) -> int: return [ int(10_00 * (box[0] / width) ), int(10_00 * (box[1] / height) ), int(10_00 * (box[2] / width) ), int(10_00 * (box[3] / height) ), ] def a_ ( _UpperCAmelCase : np.ndarray ,_UpperCAmelCase : Optional[str] ,_UpperCAmelCase : Optional[str] = None ) -> List[Any]: __snake_case : int = tesseract_config if tesseract_config is not None else '' # apply OCR __snake_case : List[str] = to_pil_image(_UpperCAmelCase ) __snake_case , __snake_case : str = pil_image.size __snake_case : Optional[Any] = pytesseract.image_to_data(_UpperCAmelCase ,lang=_UpperCAmelCase ,output_type='dict' ,config=_UpperCAmelCase ) __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = data['text'], data['left'], data['top'], data['width'], data['height'] # filter empty words and corresponding coordinates __snake_case : Dict = [idx for idx, word in enumerate(_UpperCAmelCase ) if not word.strip()] __snake_case : Tuple = [word for idx, word in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices] __snake_case : Any = [coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices] __snake_case : int = [coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices] __snake_case : Union[str, Any] = [coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices] __snake_case : List[Any] = [coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format __snake_case : Tuple = [] for x, y, w, h in zip(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ): __snake_case : Any = [x, y, x + w, y + h] actual_boxes.append(_UpperCAmelCase ) # finally, normalize the bounding boxes __snake_case : Optional[Any] = [] for box in actual_boxes: normalized_boxes.append(normalize_box(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) ) assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ), "Not as many words as there are bounding boxes" return words, normalized_boxes class snake_case__ ( SCREAMING_SNAKE_CASE_ ): A__ = ['''pixel_values'''] def __init__( self : Any , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : bool = True , __a : Optional[str] = None , __a : Optional[str] = "" , **__a : List[Any] , ) -> None: '''simple docstring''' super().__init__(**__a ) __snake_case : List[Any] = size if size is not None else {'height': 224, 'width': 224} __snake_case : List[Any] = get_size_dict(__a ) __snake_case : List[Any] = do_resize __snake_case : Dict = size __snake_case : Dict = resample __snake_case : Dict = apply_ocr __snake_case : List[Any] = ocr_lang __snake_case : List[str] = tesseract_config def A_ ( self : Dict , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[Any] , ) -> np.ndarray: '''simple docstring''' __snake_case : Union[str, Any] = get_size_dict(__a ) if "height" not in size or "width" not in size: raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' ) __snake_case : Dict = (size['height'], size['width']) return resize(__a , size=__a , resample=__a , data_format=__a , **__a ) def A_ ( self : Optional[Any] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Optional[str] = None , __a : Optional[str] = None , __a : Optional[Union[str, TensorType]] = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : Any , ) -> PIL.Image.Image: '''simple docstring''' __snake_case : int = do_resize if do_resize is not None else self.do_resize __snake_case : Optional[int] = size if size is not None else self.size __snake_case : Tuple = get_size_dict(__a ) __snake_case : List[Any] = resample if resample is not None else self.resample __snake_case : Union[str, Any] = apply_ocr if apply_ocr is not None else self.apply_ocr __snake_case : Any = ocr_lang if ocr_lang is not None else self.ocr_lang __snake_case : Tuple = tesseract_config if tesseract_config is not None else self.tesseract_config __snake_case : Tuple = make_list_of_images(__a ) if not valid_images(__a ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) # All transformations expect numpy arrays. __snake_case : List[Any] = [to_numpy_array(__a ) for image in images] if apply_ocr: requires_backends(self , 'pytesseract' ) __snake_case : List[Any] = [] __snake_case : Union[str, Any] = [] for image in images: __snake_case , __snake_case : Optional[Any] = apply_tesseract(__a , __a , __a ) words_batch.append(__a ) boxes_batch.append(__a ) if do_resize: __snake_case : Any = [self.resize(image=__a , size=__a , resample=__a ) for image in images] # flip color channels from RGB to BGR (as Detectron2 requires this) __snake_case : List[Any] = [flip_channel_order(__a ) for image in images] __snake_case : Dict = [to_channel_dimension_format(__a , __a ) for image in images] __snake_case : Optional[Any] = BatchFeature(data={'pixel_values': images} , tensor_type=__a ) if apply_ocr: __snake_case : int = words_batch __snake_case : List[str] = boxes_batch return data
0
'''simple docstring''' import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import enable_full_determinism, skip_mps from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): A__ = KandinskyVaaPriorPipeline A__ = ['''prompt'''] A__ = ['''prompt''', '''negative_prompt'''] A__ = [ '''num_images_per_prompt''', '''generator''', '''num_inference_steps''', '''latents''', '''negative_prompt''', '''guidance_scale''', '''output_type''', '''return_dict''', ] A__ = False @property def A_ ( self : Dict ) -> List[str]: '''simple docstring''' return 32 @property def A_ ( self : Any ) -> str: '''simple docstring''' return 32 @property def A_ ( self : str ) -> Optional[int]: '''simple docstring''' return self.time_input_dim @property def A_ ( self : str ) -> int: '''simple docstring''' return self.time_input_dim * 4 @property def A_ ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' return 100 @property def A_ ( self : Tuple ) -> List[str]: '''simple docstring''' __snake_case : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def A_ ( self : Dict ) -> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) __snake_case : Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(__a ) @property def A_ ( self : Union[str, Any] ) -> Any: '''simple docstring''' torch.manual_seed(0 ) __snake_case : Any = { 'num_attention_heads': 2, 'attention_head_dim': 12, 'embedding_dim': self.text_embedder_hidden_size, 'num_layers': 1, } __snake_case : List[Any] = PriorTransformer(**__a ) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 __snake_case : Any = nn.Parameter(torch.ones(model.clip_std.shape ) ) return model @property def A_ ( self : List[str] ) -> List[str]: '''simple docstring''' torch.manual_seed(0 ) __snake_case : Optional[Any] = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , ) __snake_case : Optional[Any] = CLIPVisionModelWithProjection(__a ) return model @property def A_ ( self : Dict ) -> List[Any]: '''simple docstring''' __snake_case : Dict = CLIPImageProcessor( crop_size=224 , do_center_crop=__a , do_normalize=__a , do_resize=__a , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=224 , ) return image_processor def A_ ( self : Dict ) -> Optional[int]: '''simple docstring''' __snake_case : Tuple = self.dummy_prior __snake_case : List[str] = self.dummy_image_encoder __snake_case : str = self.dummy_text_encoder __snake_case : List[str] = self.dummy_tokenizer __snake_case : List[str] = self.dummy_image_processor __snake_case : Any = UnCLIPScheduler( variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=__a , clip_sample_range=1_0.0 , ) __snake_case : str = { 'prior': prior, 'image_encoder': image_encoder, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'scheduler': scheduler, 'image_processor': image_processor, } return components def A_ ( self : List[Any] , __a : Optional[Any] , __a : Tuple=0 ) -> Any: '''simple docstring''' if str(__a ).startswith('mps' ): __snake_case : List[str] = torch.manual_seed(__a ) else: __snake_case : List[str] = torch.Generator(device=__a ).manual_seed(__a ) __snake_case : List[Any] = { 'prompt': 'horse', 'generator': generator, 'guidance_scale': 4.0, 'num_inference_steps': 2, 'output_type': 'np', } return inputs def A_ ( self : str ) -> Dict: '''simple docstring''' __snake_case : str = 'cpu' __snake_case : List[str] = self.get_dummy_components() __snake_case : Tuple = self.pipeline_class(**__a ) __snake_case : Optional[Any] = pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) __snake_case : Optional[int] = pipe(**self.get_dummy_inputs(__a ) ) __snake_case : List[str] = output.image_embeds __snake_case : str = pipe( **self.get_dummy_inputs(__a ) , return_dict=__a , )[0] __snake_case : Union[str, Any] = image[0, -10:] __snake_case : Any = image_from_tuple[0, -10:] assert image.shape == (1, 32) __snake_case : List[Any] = np.array( [-0.0_5_3_2, 1.7_1_2_0, 0.3_6_5_6, -1.0_8_5_2, -0.8_9_4_6, -1.1_7_5_6, 0.4_3_4_8, 0.2_4_8_2, 0.5_1_4_6, -0.1_1_5_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def A_ ( self : Tuple ) -> Optional[int]: '''simple docstring''' __snake_case : Union[str, Any] = torch_device == 'cpu' __snake_case : Dict = True __snake_case : Union[str, Any] = False self._test_inference_batch_single_identical( test_max_difference=__a , relax_max_difference=__a , test_mean_pixel_difference=__a , ) @skip_mps def A_ ( self : str ) -> Union[str, Any]: '''simple docstring''' __snake_case : Dict = torch_device == 'cpu' __snake_case : Optional[Any] = False self._test_attention_slicing_forward_pass( test_max_difference=__a , test_mean_pixel_difference=__a , )
0
1
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class snake_case__ : @staticmethod def A_ ( *__a : Optional[Any] , **__a : Tuple ) -> Tuple: '''simple docstring''' pass @is_pipeline_test @require_vision @require_timm @require_torch class snake_case__ ( unittest.TestCase ): A__ = MODEL_FOR_OBJECT_DETECTION_MAPPING def A_ ( self : Any , __a : int , __a : Union[str, Any] , __a : Dict ) -> Optional[Any]: '''simple docstring''' __snake_case : List[str] = ObjectDetectionPipeline(model=__a , image_processor=__a ) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def A_ ( self : str , __a : List[Any] , __a : Optional[int] ) -> str: '''simple docstring''' __snake_case : str = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png' , threshold=0.0 ) self.assertGreater(len(__a ) , 0 ) for detected_object in outputs: self.assertEqual( __a , { 'score': ANY(__a ), 'label': ANY(__a ), 'box': {'xmin': ANY(__a ), 'ymin': ANY(__a ), 'xmax': ANY(__a ), 'ymax': ANY(__a )}, } , ) import datasets __snake_case : str = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' ) __snake_case : Any = [ Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ), 'http://images.cocodataset.org/val2017/000000039769.jpg', # RGBA dataset[0]['file'], # LA dataset[1]['file'], # L dataset[2]['file'], ] __snake_case : List[Any] = object_detector(__a , threshold=0.0 ) self.assertEqual(len(__a ) , len(__a ) ) for outputs in batch_outputs: self.assertGreater(len(__a ) , 0 ) for detected_object in outputs: self.assertEqual( __a , { 'score': ANY(__a ), 'label': ANY(__a ), 'box': {'xmin': ANY(__a ), 'ymin': ANY(__a ), 'xmax': ANY(__a ), 'ymax': ANY(__a )}, } , ) @require_tf @unittest.skip('Object detection not implemented in TF' ) def A_ ( self : Any ) -> List[str]: '''simple docstring''' pass @require_torch def A_ ( self : List[str] ) -> List[Any]: '''simple docstring''' __snake_case : int = 'hf-internal-testing/tiny-detr-mobilenetsv3' __snake_case : Any = AutoModelForObjectDetection.from_pretrained(__a ) __snake_case : str = AutoFeatureExtractor.from_pretrained(__a ) __snake_case : List[Any] = ObjectDetectionPipeline(model=__a , feature_extractor=__a ) __snake_case : List[Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=0.0 ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}}, {'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}}, ] , ) __snake_case : Dict = object_detector( [ 'http://images.cocodataset.org/val2017/000000039769.jpg', 'http://images.cocodataset.org/val2017/000000039769.jpg', ] , threshold=0.0 , ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ [ {'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}}, {'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}}, ], [ {'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}}, {'score': 0.3_3_7_6, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}}, ], ] , ) @require_torch @slow def A_ ( self : Union[str, Any] ) -> str: '''simple docstring''' __snake_case : Dict = 'facebook/detr-resnet-50' __snake_case : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(__a ) __snake_case : Optional[int] = AutoFeatureExtractor.from_pretrained(__a ) __snake_case : Tuple = ObjectDetectionPipeline(model=__a , feature_extractor=__a ) __snake_case : Optional[int] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}}, {'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}}, {'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}}, {'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}}, {'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}}, ] , ) __snake_case : Any = object_detector( [ 'http://images.cocodataset.org/val2017/000000039769.jpg', 'http://images.cocodataset.org/val2017/000000039769.jpg', ] ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ [ {'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}}, {'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}}, {'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}}, {'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}}, {'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}}, ], [ {'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}}, {'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}}, {'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}}, {'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}}, {'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}}, ], ] , ) @require_torch @slow def A_ ( self : Union[str, Any] ) -> Dict: '''simple docstring''' __snake_case : Tuple = 'facebook/detr-resnet-50' __snake_case : Optional[Any] = pipeline('object-detection' , model=__a ) __snake_case : List[Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}}, {'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}}, {'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}}, {'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}}, {'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}}, ] , ) __snake_case : List[str] = object_detector( [ 'http://images.cocodataset.org/val2017/000000039769.jpg', 'http://images.cocodataset.org/val2017/000000039769.jpg', ] ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ [ {'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}}, {'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}}, {'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}}, {'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}}, {'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}}, ], [ {'score': 0.9_9_8_2, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}}, {'score': 0.9_9_6_0, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}}, {'score': 0.9_9_5_5, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}}, {'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}}, {'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}}, ], ] , ) @require_torch @slow def A_ ( self : Tuple ) -> List[Any]: '''simple docstring''' __snake_case : str = 0.9_9_8_5 __snake_case : Dict = 'facebook/detr-resnet-50' __snake_case : Union[str, Any] = pipeline('object-detection' , model=__a ) __snake_case : List[Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=__a ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {'score': 0.9_9_8_8, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}}, {'score': 0.9_9_8_7, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}}, ] , ) @require_torch @require_pytesseract @slow def A_ ( self : List[Any] ) -> List[Any]: '''simple docstring''' __snake_case : Tuple = 'Narsil/layoutlmv3-finetuned-funsd' __snake_case : Optional[int] = 0.9_9_9_3 __snake_case : Optional[int] = pipeline('object-detection' , model=__a , threshold=__a ) __snake_case : Tuple = object_detector( 'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' ) self.assertEqual( nested_simplify(__a , decimals=4 ) , [ {'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}}, {'score': 0.9_9_9_3, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}}, ] , )
0
'''simple docstring''' from math import factorial A__ : dict[str, int] = {str(digit): factorial(digit) for digit in range(1_0)} def a_ ( _UpperCAmelCase : int ) -> int: if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ): raise TypeError('Parameter number must be int' ) if number < 0: raise ValueError('Parameter number must be greater than or equal to 0' ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(_UpperCAmelCase ) ) def a_ ( _UpperCAmelCase : int = 60 ,_UpperCAmelCase : int = 1_00_00_00 ) -> int: if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ) or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ): raise TypeError('Parameters chain_length and number_limit must be int' ) if chain_length <= 0 or number_limit <= 0: raise ValueError( 'Parameters chain_length and number_limit must be greater than 0' ) # the counter for the chains with the exact desired length __snake_case : List[str] = 0 # the cached sizes of the previous chains __snake_case : dict[int, int] = {} for start_chain_element in range(1 ,_UpperCAmelCase ): # The temporary set will contain the elements of the chain __snake_case : Optional[int] = set() __snake_case : List[Any] = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. __snake_case : str = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(_UpperCAmelCase ) chain_set_length += 1 __snake_case : Tuple = digit_factorial_sum(_UpperCAmelCase ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] __snake_case : Optional[Any] = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(F"""{solution()}""")
0
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_electra import ElectraTokenizer A__ : Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} A__ : List[Any] = { '''vocab_file''': { '''google/electra-small-generator''': ( '''https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt''' ), '''google/electra-base-generator''': '''https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt''', '''google/electra-large-generator''': ( '''https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt''' ), '''google/electra-small-discriminator''': ( '''https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt''' ), '''google/electra-base-discriminator''': ( '''https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt''' ), '''google/electra-large-discriminator''': ( '''https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''google/electra-small-generator''': ( '''https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json''' ), '''google/electra-base-generator''': ( '''https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json''' ), '''google/electra-large-generator''': ( '''https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json''' ), '''google/electra-small-discriminator''': ( '''https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json''' ), '''google/electra-base-discriminator''': ( '''https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json''' ), '''google/electra-large-discriminator''': ( '''https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json''' ), }, } A__ : List[Any] = { '''google/electra-small-generator''': 5_1_2, '''google/electra-base-generator''': 5_1_2, '''google/electra-large-generator''': 5_1_2, '''google/electra-small-discriminator''': 5_1_2, '''google/electra-base-discriminator''': 5_1_2, '''google/electra-large-discriminator''': 5_1_2, } A__ : Optional[Any] = { '''google/electra-small-generator''': {'''do_lower_case''': True}, '''google/electra-base-generator''': {'''do_lower_case''': True}, '''google/electra-large-generator''': {'''do_lower_case''': True}, '''google/electra-small-discriminator''': {'''do_lower_case''': True}, '''google/electra-base-discriminator''': {'''do_lower_case''': True}, '''google/electra-large-discriminator''': {'''do_lower_case''': True}, } class snake_case__ ( SCREAMING_SNAKE_CASE_ ): A__ = VOCAB_FILES_NAMES A__ = PRETRAINED_VOCAB_FILES_MAP A__ = PRETRAINED_INIT_CONFIGURATION A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ = ElectraTokenizer def __init__( self : int , __a : List[Any]=None , __a : int=None , __a : List[str]=True , __a : Any="[UNK]" , __a : Any="[SEP]" , __a : Union[str, Any]="[PAD]" , __a : Dict="[CLS]" , __a : List[Any]="[MASK]" , __a : str=True , __a : Optional[int]=None , **__a : Optional[int] , ) -> str: '''simple docstring''' super().__init__( __a , tokenizer_file=__a , do_lower_case=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , tokenize_chinese_chars=__a , strip_accents=__a , **__a , ) __snake_case : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , __a ) != do_lower_case or normalizer_state.get('strip_accents' , __a ) != strip_accents or normalizer_state.get('handle_chinese_chars' , __a ) != tokenize_chinese_chars ): __snake_case : List[Any] = getattr(__a , normalizer_state.pop('type' ) ) __snake_case : str = do_lower_case __snake_case : Optional[int] = strip_accents __snake_case : Any = tokenize_chinese_chars __snake_case : Union[str, Any] = normalizer_class(**__a ) __snake_case : Any = do_lower_case def A_ ( self : Any , __a : List[str] , __a : Optional[Any]=None ) -> Dict: '''simple docstring''' __snake_case : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def A_ ( self : List[Any] , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' __snake_case : int = [self.sep_token_id] __snake_case : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def A_ ( self : Optional[int] , __a : str , __a : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' __snake_case : Tuple = self._tokenizer.model.save(__a , name=__a ) return tuple(__a )
0
'''simple docstring''' def a_ ( _UpperCAmelCase : int = 1_00 ) -> int: __snake_case : Any = n * (n + 1) * (2 * n + 1) / 6 __snake_case : Union[str, Any] = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F"""{solution() = }""")
0
1
'''simple docstring''' import unittest import torch from torch import nn from diffusers.models.activations import get_activation class snake_case__ ( unittest.TestCase ): def A_ ( self : List[str] ) -> int: '''simple docstring''' __snake_case : Dict = get_activation('swish' ) self.assertIsInstance(__a , nn.SiLU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def A_ ( self : Optional[Any] ) -> Optional[int]: '''simple docstring''' __snake_case : Tuple = get_activation('silu' ) self.assertIsInstance(__a , nn.SiLU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def A_ ( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' __snake_case : Optional[Any] = get_activation('mish' ) self.assertIsInstance(__a , nn.Mish ) self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def A_ ( self : List[str] ) -> List[str]: '''simple docstring''' __snake_case : Tuple = get_activation('gelu' ) self.assertIsInstance(__a , nn.GELU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available A__ : int = { '''configuration_groupvit''': [ '''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GroupViTConfig''', '''GroupViTOnnxConfig''', '''GroupViTTextConfig''', '''GroupViTVisionConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Tuple = [ '''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GroupViTModel''', '''GroupViTPreTrainedModel''', '''GroupViTTextModel''', '''GroupViTVisionModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Optional[int] = [ '''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFGroupViTModel''', '''TFGroupViTPreTrainedModel''', '''TFGroupViTTextModel''', '''TFGroupViTVisionModel''', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys A__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
0
1
'''simple docstring''' from __future__ import annotations from collections import deque class snake_case__ : def __init__( self : Union[str, Any] , __a : list[str] ) -> Union[str, Any]: '''simple docstring''' __snake_case : list[dict] = [] self.adlist.append( {'value': '', 'next_states': [], 'fail_state': 0, 'output': []} ) for keyword in keywords: self.add_keyword(__a ) self.set_fail_transitions() def A_ ( self : str , __a : int , __a : str ) -> int | None: '''simple docstring''' for state in self.adlist[current_state]["next_states"]: if char == self.adlist[state]["value"]: return state return None def A_ ( self : Tuple , __a : str ) -> None: '''simple docstring''' __snake_case : List[str] = 0 for character in keyword: __snake_case : Any = self.find_next_state(__a , __a ) if next_state is None: self.adlist.append( { 'value': character, 'next_states': [], 'fail_state': 0, 'output': [], } ) self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 ) __snake_case : Optional[int] = len(self.adlist ) - 1 else: __snake_case : List[str] = next_state self.adlist[current_state]["output"].append(__a ) def A_ ( self : List[str] ) -> None: '''simple docstring''' __snake_case : deque = deque() for node in self.adlist[0]["next_states"]: q.append(__a ) __snake_case : Optional[int] = 0 while q: __snake_case : Optional[int] = q.popleft() for child in self.adlist[r]["next_states"]: q.append(__a ) __snake_case : Optional[int] = self.adlist[r]['fail_state'] while ( self.find_next_state(__a , self.adlist[child]['value'] ) is None and state != 0 ): __snake_case : Optional[int] = self.adlist[state]['fail_state'] __snake_case : int = self.find_next_state( __a , self.adlist[child]['value'] ) if self.adlist[child]["fail_state"] is None: __snake_case : Optional[int] = 0 __snake_case : Dict = ( self.adlist[child]['output'] + self.adlist[self.adlist[child]['fail_state']]['output'] ) def A_ ( self : int , __a : str ) -> dict[str, list[int]]: '''simple docstring''' __snake_case : dict = {} # returns a dict with keywords and list of its occurrences __snake_case : Tuple = 0 for i in range(len(__a ) ): while ( self.find_next_state(__a , string[i] ) is None and current_state != 0 ): __snake_case : Dict = self.adlist[current_state]['fail_state'] __snake_case : Union[str, Any] = self.find_next_state(__a , string[i] ) if next_state is None: __snake_case : Union[str, Any] = 0 else: __snake_case : Optional[Any] = next_state for key in self.adlist[current_state]["output"]: if key not in result: __snake_case : Tuple = [] result[key].append(i - len(__a ) + 1 ) return result if __name__ == "__main__": import doctest doctest.testmod()
0
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): A__ = ShapEPipeline A__ = ['''prompt'''] A__ = ['''prompt'''] A__ = [ '''num_images_per_prompt''', '''num_inference_steps''', '''generator''', '''latents''', '''guidance_scale''', '''frame_size''', '''output_type''', '''return_dict''', ] A__ = False @property def A_ ( self : Optional[Any] ) -> str: '''simple docstring''' return 32 @property def A_ ( self : str ) -> Optional[int]: '''simple docstring''' return 32 @property def A_ ( self : Tuple ) -> List[Any]: '''simple docstring''' return self.time_input_dim * 4 @property def A_ ( self : Tuple ) -> Dict: '''simple docstring''' return 8 @property def A_ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' __snake_case : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def A_ ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' torch.manual_seed(0 ) __snake_case : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(__a ) @property def A_ ( self : Union[str, Any] ) -> int: '''simple docstring''' torch.manual_seed(0 ) __snake_case : Dict = { 'num_attention_heads': 2, 'attention_head_dim': 16, 'embedding_dim': self.time_input_dim, 'num_embeddings': 32, 'embedding_proj_dim': self.text_embedder_hidden_size, 'time_embed_dim': self.time_embed_dim, 'num_layers': 1, 'clip_embed_dim': self.time_input_dim * 2, 'additional_embeddings': 0, 'time_embed_act_fn': 'gelu', 'norm_in_type': 'layer', 'encoder_hid_proj_type': None, 'added_emb_type': None, } __snake_case : Optional[Any] = PriorTransformer(**__a ) return model @property def A_ ( self : Dict ) -> Dict: '''simple docstring''' torch.manual_seed(0 ) __snake_case : Tuple = { 'param_shapes': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), 'd_latent': self.time_input_dim, 'd_hidden': self.renderer_dim, 'n_output': 12, 'background': ( 0.1, 0.1, 0.1, ), } __snake_case : Optional[int] = ShapERenderer(**__a ) return model def A_ ( self : Tuple ) -> Tuple: '''simple docstring''' __snake_case : Tuple = self.dummy_prior __snake_case : Union[str, Any] = self.dummy_text_encoder __snake_case : List[str] = self.dummy_tokenizer __snake_case : Optional[Any] = self.dummy_renderer __snake_case : List[Any] = HeunDiscreteScheduler( beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=__a , clip_sample=__a , clip_sample_range=1.0 , ) __snake_case : int = { 'prior': prior, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'renderer': renderer, 'scheduler': scheduler, } return components def A_ ( self : Union[str, Any] , __a : Dict , __a : int=0 ) -> Optional[Any]: '''simple docstring''' if str(__a ).startswith('mps' ): __snake_case : List[str] = torch.manual_seed(__a ) else: __snake_case : Optional[Any] = torch.Generator(device=__a ).manual_seed(__a ) __snake_case : Optional[int] = { 'prompt': 'horse', 'generator': generator, 'num_inference_steps': 1, 'frame_size': 32, 'output_type': 'np', } return inputs def A_ ( self : List[Any] ) -> List[Any]: '''simple docstring''' __snake_case : Dict = 'cpu' __snake_case : Dict = self.get_dummy_components() __snake_case : int = self.pipeline_class(**__a ) __snake_case : str = pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) __snake_case : Optional[Any] = pipe(**self.get_dummy_inputs(__a ) ) __snake_case : Dict = output.images[0] __snake_case : int = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) __snake_case : str = np.array( [ 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def A_ ( self : Any ) -> List[str]: '''simple docstring''' # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def A_ ( self : int ) -> Tuple: '''simple docstring''' __snake_case : int = torch_device == 'cpu' __snake_case : str = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=__a , relax_max_difference=__a , ) def A_ ( self : List[str] ) -> Dict: '''simple docstring''' __snake_case : str = self.get_dummy_components() __snake_case : Tuple = self.pipeline_class(**__a ) __snake_case : Dict = pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) __snake_case : int = 1 __snake_case : Tuple = 2 __snake_case : Tuple = self.get_dummy_inputs(__a ) for key in inputs.keys(): if key in self.batch_params: __snake_case : Union[str, Any] = batch_size * [inputs[key]] __snake_case : str = pipe(**__a , num_images_per_prompt=__a )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class snake_case__ ( unittest.TestCase ): def A_ ( self : str ) -> Dict: '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def A_ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' __snake_case : Optional[int] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/test_shap_e_np_out.npy' ) __snake_case : Union[str, Any] = ShapEPipeline.from_pretrained('openai/shap-e' ) __snake_case : Any = pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) __snake_case : Optional[int] = torch.Generator(device=__a ).manual_seed(0 ) __snake_case : Union[str, Any] = pipe( 'a shark' , generator=__a , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(__a , __a )
0
1
'''simple docstring''' import importlib import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Union import torch from ..utils import BaseOutput A__ : List[str] = '''scheduler_config.json''' class snake_case__ ( SCREAMING_SNAKE_CASE_ ): A__ = 1 A__ = 2 A__ = 3 A__ = 4 A__ = 5 A__ = 6 A__ = 7 A__ = 8 A__ = 9 A__ = 10 A__ = 11 A__ = 12 A__ = 13 A__ = 14 @dataclass class snake_case__ ( SCREAMING_SNAKE_CASE_ ): A__ = 42 class snake_case__ : A__ = SCHEDULER_CONFIG_NAME A__ = [] A__ = True @classmethod def A_ ( cls : str , __a : Dict[str, Any] = None , __a : Optional[str] = None , __a : str=False , **__a : Tuple , ) -> List[str]: '''simple docstring''' __snake_case , __snake_case , __snake_case : int = cls.load_config( pretrained_model_name_or_path=__a , subfolder=__a , return_unused_kwargs=__a , return_commit_hash=__a , **__a , ) return cls.from_config(__a , return_unused_kwargs=__a , **__a ) def A_ ( self : Dict , __a : Union[str, os.PathLike] , __a : bool = False , **__a : Any ) -> List[str]: '''simple docstring''' self.save_config(save_directory=__a , push_to_hub=__a , **__a ) @property def A_ ( self : Tuple ) -> List[Any]: '''simple docstring''' return self._get_compatibles() @classmethod def A_ ( cls : List[Any] ) -> Optional[Any]: '''simple docstring''' __snake_case : Union[str, Any] = list(set([cls.__name__] + cls._compatibles ) ) __snake_case : str = importlib.import_module(__name__.split('.' )[0] ) __snake_case : Dict = [ getattr(__a , __a ) for c in compatible_classes_str if hasattr(__a , __a ) ] return compatible_classes
0
'''simple docstring''' from __future__ import annotations import time import numpy as np A__ : str = [8, 5, 9, 7] A__ : List[str] = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] A__ : Dict = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class snake_case__ : def __init__( self : Union[str, Any] , __a : list[int] , __a : list[list[int]] , __a : list[list[int]] , ) -> None: '''simple docstring''' __snake_case : int = claim_vector __snake_case : Optional[int] = allocated_resources_table __snake_case : List[str] = maximum_claim_table def A_ ( self : str ) -> list[int]: '''simple docstring''' return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def A_ ( self : int ) -> list[int]: '''simple docstring''' return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def A_ ( self : int ) -> list[list[int]]: '''simple docstring''' return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(__a ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def A_ ( self : str ) -> dict[int, list[int]]: '''simple docstring''' return {self.__need().index(__a ): i for i in self.__need()} def A_ ( self : Union[str, Any] , **__a : int ) -> None: '''simple docstring''' __snake_case : str = self.__need() __snake_case : List[Any] = self.__allocated_resources_table __snake_case : Optional[int] = self.__available_resources() __snake_case : Union[str, Any] = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print('_' * 50 + '\n' ) while need_list: __snake_case : Tuple = False for each_need in need_list: __snake_case : Any = True for index, need in enumerate(__a ): if need > available_resources[index]: __snake_case : List[str] = False break if execution: __snake_case : Union[str, Any] = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: __snake_case : str = original_need_index print(f'''Process {process_number + 1} is executing.''' ) # remove the process run from stack need_list.remove(__a ) # update available/freed resources stack __snake_case : Union[str, Any] = np.array(__a ) + np.array( alloc_resources_table[process_number] ) print( 'Updated available resource stack for processes: ' + ' '.join([str(__a ) for x in available_resources] ) ) break if safe: print('The process is in a safe state.\n' ) else: print('System in unsafe state. Aborting...\n' ) break def A_ ( self : List[str] ) -> Optional[int]: '''simple docstring''' print(' ' * 9 + 'Allocated Resource Table' ) for item in self.__allocated_resources_table: print( f'''P{self.__allocated_resources_table.index(__a ) + 1}''' + ' '.join(f'''{it:>8}''' for it in item ) + '\n' ) print(' ' * 9 + 'System Resource Table' ) for item in self.__maximum_claim_table: print( f'''P{self.__maximum_claim_table.index(__a ) + 1}''' + ' '.join(f'''{it:>8}''' for it in item ) + '\n' ) print( 'Current Usage by Active Processes: ' + ' '.join(str(__a ) for x in self.__claim_vector ) ) print( 'Initial Available Resources: ' + ' '.join(str(__a ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
0
1
'''simple docstring''' def a_ ( _UpperCAmelCase : int = 50 ) -> int: __snake_case : Tuple = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 ,5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(F"""{solution() = }""")
0
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_electra import ElectraTokenizer A__ : Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} A__ : List[Any] = { '''vocab_file''': { '''google/electra-small-generator''': ( '''https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt''' ), '''google/electra-base-generator''': '''https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt''', '''google/electra-large-generator''': ( '''https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt''' ), '''google/electra-small-discriminator''': ( '''https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt''' ), '''google/electra-base-discriminator''': ( '''https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt''' ), '''google/electra-large-discriminator''': ( '''https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''google/electra-small-generator''': ( '''https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json''' ), '''google/electra-base-generator''': ( '''https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json''' ), '''google/electra-large-generator''': ( '''https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json''' ), '''google/electra-small-discriminator''': ( '''https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json''' ), '''google/electra-base-discriminator''': ( '''https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json''' ), '''google/electra-large-discriminator''': ( '''https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json''' ), }, } A__ : List[Any] = { '''google/electra-small-generator''': 5_1_2, '''google/electra-base-generator''': 5_1_2, '''google/electra-large-generator''': 5_1_2, '''google/electra-small-discriminator''': 5_1_2, '''google/electra-base-discriminator''': 5_1_2, '''google/electra-large-discriminator''': 5_1_2, } A__ : Optional[Any] = { '''google/electra-small-generator''': {'''do_lower_case''': True}, '''google/electra-base-generator''': {'''do_lower_case''': True}, '''google/electra-large-generator''': {'''do_lower_case''': True}, '''google/electra-small-discriminator''': {'''do_lower_case''': True}, '''google/electra-base-discriminator''': {'''do_lower_case''': True}, '''google/electra-large-discriminator''': {'''do_lower_case''': True}, } class snake_case__ ( SCREAMING_SNAKE_CASE_ ): A__ = VOCAB_FILES_NAMES A__ = PRETRAINED_VOCAB_FILES_MAP A__ = PRETRAINED_INIT_CONFIGURATION A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ = ElectraTokenizer def __init__( self : int , __a : List[Any]=None , __a : int=None , __a : List[str]=True , __a : Any="[UNK]" , __a : Any="[SEP]" , __a : Union[str, Any]="[PAD]" , __a : Dict="[CLS]" , __a : List[Any]="[MASK]" , __a : str=True , __a : Optional[int]=None , **__a : Optional[int] , ) -> str: '''simple docstring''' super().__init__( __a , tokenizer_file=__a , do_lower_case=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , tokenize_chinese_chars=__a , strip_accents=__a , **__a , ) __snake_case : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , __a ) != do_lower_case or normalizer_state.get('strip_accents' , __a ) != strip_accents or normalizer_state.get('handle_chinese_chars' , __a ) != tokenize_chinese_chars ): __snake_case : List[Any] = getattr(__a , normalizer_state.pop('type' ) ) __snake_case : str = do_lower_case __snake_case : Optional[int] = strip_accents __snake_case : Any = tokenize_chinese_chars __snake_case : Union[str, Any] = normalizer_class(**__a ) __snake_case : Any = do_lower_case def A_ ( self : Any , __a : List[str] , __a : Optional[Any]=None ) -> Dict: '''simple docstring''' __snake_case : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def A_ ( self : List[Any] , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' __snake_case : int = [self.sep_token_id] __snake_case : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def A_ ( self : Optional[int] , __a : str , __a : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' __snake_case : Tuple = self._tokenizer.model.save(__a , name=__a ) return tuple(__a )
0
1
'''simple docstring''' import argparse import shlex import runhouse as rh if __name__ == "__main__": # Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access # setup instructions, if using on-demand hardware # If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster # If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster # Throw an error if user passes both BYO and on-demand cluster args # Otherwise, use default values A__ : str = argparse.ArgumentParser() parser.add_argument('''--user''', type=str, default='''ubuntu''') parser.add_argument('''--host''', type=str, default='''localhost''') parser.add_argument('''--key_path''', type=str, default=None) parser.add_argument('''--instance''', type=str, default='''V100:1''') parser.add_argument('''--provider''', type=str, default='''cheapest''') parser.add_argument('''--use_spot''', type=bool, default=False) parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''') A__ , A__ : List[Any] = parser.parse_known_args() if args.host != "localhost": if args.instance != "V100:1" or args.provider != "cheapest": raise ValueError('''Cannot specify both BYO and on-demand cluster args''') A__ : Optional[int] = rh.cluster( name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path} ) else: A__ : Optional[int] = rh.cluster( name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot ) A__ : int = args.example.rsplit('''/''', 1)[0] # Set up remote environment cluster.install_packages(['''pip:./''']) # Installs transformers from local source # Note transformers is copied into the home directory on the remote machine, so we can install from there cluster.run([F"""pip install -r transformers/examples/{example_dir}/requirements.txt"""]) cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117''']) # Run example. You can bypass the CLI wrapper and paste your own code here. cluster.run([F"""python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}"""]) # Alternatively, we can just import and run a training function (especially if there's no wrapper CLI): # from my_script... import train # reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard'] # launch_train_gpu = rh.function(fn=train, # system=gpu, # reqs=reqs, # name='train_bert_glue') # # We can pass in arguments just like we would to a function: # launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16 # stream_logs=True)
0
'''simple docstring''' def a_ ( _UpperCAmelCase : int ) -> bool: __snake_case : Union[str, Any] = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(2_7)) print(perfect_cube(4))
0
1
'''simple docstring''' def a_ ( _UpperCAmelCase : int = 10 ,_UpperCAmelCase : int = 22 ) -> int: __snake_case : List[str] = range(1 ,_UpperCAmelCase ) __snake_case : Optional[int] = range(1 ,_UpperCAmelCase ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(F"""{solution(1_0, 2_2) = }""")
0
'''simple docstring''' import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss A__ : Tuple = pytest.mark.integration @require_faiss class snake_case__ ( SCREAMING_SNAKE_CASE_ ): def A_ ( self : Any ) -> Tuple: '''simple docstring''' __snake_case : Dict = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(__a ) for x in np.arange(30 ).tolist()]} ) return dset def A_ ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' import faiss __snake_case : Dataset = self._create_dummy_dataset() __snake_case : Dict = dset.map( lambda __a , __a : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__a , keep_in_memory=__a ) __snake_case : List[Any] = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT ) __snake_case , __snake_case : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) dset.drop_index('vecs' ) def A_ ( self : Tuple ) -> Any: '''simple docstring''' import faiss __snake_case : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , ) __snake_case , __snake_case : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) def A_ ( self : List[Any] ) -> Dict: '''simple docstring''' import faiss __snake_case : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=__a ) as tmp_file: dset.save_faiss_index('vecs' , tmp_file.name ) dset.load_faiss_index('vecs2' , tmp_file.name ) os.unlink(tmp_file.name ) __snake_case , __snake_case : str = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) def A_ ( self : Union[str, Any] ) -> Dict: '''simple docstring''' __snake_case : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' ) dset.drop_index('vecs' ) self.assertRaises(__a , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) ) def A_ ( self : List[str] ) -> List[str]: '''simple docstring''' from elasticsearch import Elasticsearch __snake_case : Dataset = self._create_dummy_dataset() with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch( 'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk: __snake_case : Any = {'acknowledged': True} mocked_bulk.return_value([(True, None)] * 30 ) __snake_case : Dict = {'hits': {'hits': [{'_score': 1, '_id': 29}]}} __snake_case : Union[str, Any] = Elasticsearch() dset.add_elasticsearch_index('filename' , es_client=__a ) __snake_case , __snake_case : str = dset.get_nearest_examples('filename' , 'my_name-train_29' ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) @require_faiss class snake_case__ ( SCREAMING_SNAKE_CASE_ ): def A_ ( self : str ) -> int: '''simple docstring''' import faiss __snake_case : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 10 ) # single query __snake_case : Dict = np.zeros(5 , dtype=np.floataa ) __snake_case : List[str] = 1 __snake_case , __snake_case : List[Any] = index.search(__a ) self.assertRaises(__a , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries __snake_case : List[str] = np.eye(5 , dtype=np.floataa )[::-1] __snake_case , __snake_case : Dict = index.search_batch(__a ) self.assertRaises(__a , index.search_batch , queries[0] ) __snake_case : Any = [scores[0] for scores in total_scores] __snake_case : List[Any] = [indices[0] for indices in total_indices] self.assertGreater(np.min(__a ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , __a ) def A_ ( self : int ) -> int: '''simple docstring''' import faiss __snake_case : int = FaissIndex(string_factory='Flat' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) __snake_case : List[str] = FaissIndex(string_factory='LSH' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(__a ): __snake_case : Dict = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) ) def A_ ( self : str ) -> Dict: '''simple docstring''' import faiss __snake_case : Tuple = faiss.IndexFlat(5 ) __snake_case : List[Any] = FaissIndex(custom_index=__a ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def A_ ( self : List[Any] ) -> int: '''simple docstring''' import faiss __snake_case : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=__a ) as tmp_file: index.save(tmp_file.name ) __snake_case : List[Any] = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) __snake_case : List[Any] = np.zeros(5 , dtype=np.floataa ) __snake_case : Any = 1 __snake_case , __snake_case : int = index.search(__a ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def a_ ( _UpperCAmelCase : str ) -> Optional[int]: import faiss __snake_case : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 ,dtype=np.floataa ) ) __snake_case : Dict = 'index.faiss' __snake_case : Any = f'''mock://{index_name}''' index.save(_UpperCAmelCase ,storage_options=mockfs.storage_options ) __snake_case : Any = FaissIndex.load(_UpperCAmelCase ,storage_options=mockfs.storage_options ) __snake_case : Any = np.zeros(5 ,dtype=np.floataa ) __snake_case : Any = 1 __snake_case , __snake_case : Tuple = index.search(_UpperCAmelCase ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class snake_case__ ( SCREAMING_SNAKE_CASE_ ): def A_ ( self : List[str] ) -> List[str]: '''simple docstring''' from elasticsearch import Elasticsearch with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch( 'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk: __snake_case : int = Elasticsearch() __snake_case : Dict = {'acknowledged': True} __snake_case : List[Any] = ElasticSearchIndex(es_client=__a ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(['foo', 'bar', 'foobar'] ) # single query __snake_case : Optional[Any] = 'foo' __snake_case : int = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} __snake_case , __snake_case : List[Any] = index.search(__a ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout __snake_case : Dict = 'foo' __snake_case : Dict = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} __snake_case , __snake_case : Optional[Any] = index.search(__a , request_timeout=30 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries __snake_case : List[Any] = ['foo', 'bar', 'foobar'] __snake_case : str = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} __snake_case , __snake_case : Any = index.search_batch(__a ) __snake_case : Any = [scores[0] for scores in total_scores] __snake_case : Tuple = [indices[0] for indices in total_indices] self.assertGreater(np.min(__a ) , 0 ) self.assertListEqual([1, 1, 1] , __a ) # batched queries with timeout __snake_case : Tuple = ['foo', 'bar', 'foobar'] __snake_case : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} __snake_case , __snake_case : int = index.search_batch(__a , request_timeout=30 ) __snake_case : Any = [scores[0] for scores in total_scores] __snake_case : Dict = [indices[0] for indices in total_indices] self.assertGreater(np.min(__a ) , 0 ) self.assertListEqual([1, 1, 1] , __a )
0
1
'''simple docstring''' import tempfile import torch from diffusers import ( DEISMultistepScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, UniPCMultistepScheduler, ) from .test_schedulers import SchedulerCommonTest class snake_case__ ( SCREAMING_SNAKE_CASE_ ): A__ = (DEISMultistepScheduler,) A__ = (('''num_inference_steps''', 25),) def A_ ( self : Tuple , **__a : Dict ) -> List[Any]: '''simple docstring''' __snake_case : int = { 'num_train_timesteps': 1000, 'beta_start': 0.0_0_0_1, 'beta_end': 0.0_2, 'beta_schedule': 'linear', 'solver_order': 2, } config.update(**__a ) return config def A_ ( self : List[str] , __a : List[str]=0 , **__a : List[str] ) -> List[Any]: '''simple docstring''' __snake_case : Any = dict(self.forward_default_kwargs ) __snake_case : int = kwargs.pop('num_inference_steps' , __a ) __snake_case : List[Any] = self.dummy_sample __snake_case : int = 0.1 * sample __snake_case : Any = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: __snake_case : Optional[Any] = self.get_scheduler_config(**__a ) __snake_case : Optional[Any] = scheduler_class(**__a ) scheduler.set_timesteps(__a ) # copy over dummy past residuals __snake_case : int = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__a ) __snake_case : Dict = scheduler_class.from_pretrained(__a ) new_scheduler.set_timesteps(__a ) # copy over dummy past residuals __snake_case : str = dummy_past_residuals[: new_scheduler.config.solver_order] __snake_case , __snake_case : Any = sample, sample for t in range(__a , time_step + scheduler.config.solver_order + 1 ): __snake_case : List[str] = scheduler.step(__a , __a , __a , **__a ).prev_sample __snake_case : Optional[Any] = new_scheduler.step(__a , __a , __a , **__a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def A_ ( self : Union[str, Any] ) -> str: '''simple docstring''' pass def A_ ( self : Tuple , __a : List[str]=0 , **__a : List[str] ) -> str: '''simple docstring''' __snake_case : List[str] = dict(self.forward_default_kwargs ) __snake_case : int = kwargs.pop('num_inference_steps' , __a ) __snake_case : Tuple = self.dummy_sample __snake_case : List[Any] = 0.1 * sample __snake_case : Dict = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] for scheduler_class in self.scheduler_classes: __snake_case : Optional[Any] = self.get_scheduler_config() __snake_case : Optional[Any] = scheduler_class(**__a ) scheduler.set_timesteps(__a ) # copy over dummy past residuals (must be after setting timesteps) __snake_case : List[Any] = dummy_past_residuals[: scheduler.config.solver_order] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(__a ) __snake_case : Optional[int] = scheduler_class.from_pretrained(__a ) # copy over dummy past residuals new_scheduler.set_timesteps(__a ) # copy over dummy past residual (must be after setting timesteps) __snake_case : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order] __snake_case : Any = scheduler.step(__a , __a , __a , **__a ).prev_sample __snake_case : str = new_scheduler.step(__a , __a , __a , **__a ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def A_ ( self : List[str] , __a : List[Any]=None , **__a : List[str] ) -> Any: '''simple docstring''' if scheduler is None: __snake_case : Any = self.scheduler_classes[0] __snake_case : Optional[int] = self.get_scheduler_config(**__a ) __snake_case : List[Any] = scheduler_class(**__a ) __snake_case : int = self.scheduler_classes[0] __snake_case : int = self.get_scheduler_config(**__a ) __snake_case : List[Any] = scheduler_class(**__a ) __snake_case : Any = 10 __snake_case : List[str] = self.dummy_model() __snake_case : List[Any] = self.dummy_sample_deter scheduler.set_timesteps(__a ) for i, t in enumerate(scheduler.timesteps ): __snake_case : str = model(__a , __a ) __snake_case : Optional[Any] = scheduler.step(__a , __a , __a ).prev_sample return sample def A_ ( self : Any ) -> Optional[Any]: '''simple docstring''' __snake_case : Tuple = dict(self.forward_default_kwargs ) __snake_case : List[Any] = kwargs.pop('num_inference_steps' , __a ) for scheduler_class in self.scheduler_classes: __snake_case : Any = self.get_scheduler_config() __snake_case : Tuple = scheduler_class(**__a ) __snake_case : Tuple = self.dummy_sample __snake_case : Tuple = 0.1 * sample if num_inference_steps is not None and hasattr(__a , 'set_timesteps' ): scheduler.set_timesteps(__a ) elif num_inference_steps is not None and not hasattr(__a , 'set_timesteps' ): __snake_case : Dict = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) __snake_case : int = [residual + 0.2, residual + 0.1_5, residual + 0.1_0] __snake_case : Optional[int] = dummy_past_residuals[: scheduler.config.solver_order] __snake_case : Union[str, Any] = scheduler.timesteps[5] __snake_case : int = scheduler.timesteps[6] __snake_case : List[Any] = scheduler.step(__a , __a , __a , **__a ).prev_sample __snake_case : Tuple = scheduler.step(__a , __a , __a , **__a ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def A_ ( self : Optional[int] ) -> List[str]: '''simple docstring''' # make sure that iterating over schedulers with same config names gives same results # for defaults __snake_case : List[str] = DEISMultistepScheduler(**self.get_scheduler_config() ) __snake_case : List[str] = self.full_loop(scheduler=__a ) __snake_case : Optional[int] = torch.mean(torch.abs(__a ) ) assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3 __snake_case : str = DPMSolverSinglestepScheduler.from_config(scheduler.config ) __snake_case : Union[str, Any] = DPMSolverMultistepScheduler.from_config(scheduler.config ) __snake_case : Union[str, Any] = UniPCMultistepScheduler.from_config(scheduler.config ) __snake_case : Optional[Any] = DEISMultistepScheduler.from_config(scheduler.config ) __snake_case : List[Any] = self.full_loop(scheduler=__a ) __snake_case : int = torch.mean(torch.abs(__a ) ) assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3 def A_ ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' for timesteps in [25, 50, 100, 999, 1000]: self.check_over_configs(num_train_timesteps=__a ) def A_ ( self : Union[str, Any] ) -> Dict: '''simple docstring''' self.check_over_configs(thresholding=__a ) for order in [1, 2, 3]: for solver_type in ["logrho"]: for threshold in [0.5, 1.0, 2.0]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( thresholding=__a , prediction_type=__a , sample_max_value=__a , algorithm_type='deis' , solver_order=__a , solver_type=__a , ) def A_ ( self : List[str] ) -> Any: '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__a ) def A_ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' for algorithm_type in ["deis"]: for solver_type in ["logrho"]: for order in [1, 2, 3]: for prediction_type in ["epsilon", "sample"]: self.check_over_configs( solver_order=__a , solver_type=__a , prediction_type=__a , algorithm_type=__a , ) __snake_case : Union[str, Any] = self.full_loop( solver_order=__a , solver_type=__a , prediction_type=__a , algorithm_type=__a , ) assert not torch.isnan(__a ).any(), "Samples have nan numbers" def A_ ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' self.check_over_configs(lower_order_final=__a ) self.check_over_configs(lower_order_final=__a ) def A_ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]: self.check_over_forward(num_inference_steps=__a , time_step=0 ) def A_ ( self : List[Any] ) -> Union[str, Any]: '''simple docstring''' __snake_case : Any = self.full_loop() __snake_case : List[Any] = torch.mean(torch.abs(__a ) ) assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3 def A_ ( self : Optional[Any] ) -> Optional[int]: '''simple docstring''' __snake_case : int = self.full_loop(prediction_type='v_prediction' ) __snake_case : Any = torch.mean(torch.abs(__a ) ) assert abs(result_mean.item() - 0.0_9_1 ) < 1e-3 def A_ ( self : Tuple ) -> int: '''simple docstring''' __snake_case : int = self.scheduler_classes[0] __snake_case : Dict = self.get_scheduler_config(thresholding=__a , dynamic_thresholding_ratio=0 ) __snake_case : List[str] = scheduler_class(**__a ) __snake_case : Dict = 10 __snake_case : Optional[int] = self.dummy_model() __snake_case : Dict = self.dummy_sample_deter.half() scheduler.set_timesteps(__a ) for i, t in enumerate(scheduler.timesteps ): __snake_case : List[Any] = model(__a , __a ) __snake_case : List[str] = scheduler.step(__a , __a , __a ).prev_sample assert sample.dtype == torch.floataa
0
'''simple docstring''' from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging A__ : List[Any] = logging.get_logger(__name__) A__ : Tuple = { '''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''', '''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''', '''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''', '''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''', '''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''', } class snake_case__ ( SCREAMING_SNAKE_CASE_ ): A__ = '''t5''' A__ = ['''past_key_values'''] A__ = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self : str , __a : Dict=32128 , __a : Dict=512 , __a : Union[str, Any]=64 , __a : str=2048 , __a : Union[str, Any]=6 , __a : Any=None , __a : Any=8 , __a : List[Any]=32 , __a : Any=128 , __a : Tuple=0.1 , __a : str=1e-6 , __a : Dict=1.0 , __a : Tuple="relu" , __a : Dict=True , __a : Union[str, Any]=True , __a : Any=0 , __a : Dict=1 , **__a : Union[str, Any] , ) -> Union[str, Any]: '''simple docstring''' __snake_case : int = vocab_size __snake_case : str = d_model __snake_case : str = d_kv __snake_case : List[Any] = d_ff __snake_case : List[str] = num_layers __snake_case : Tuple = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry __snake_case : Union[str, Any] = num_heads __snake_case : Tuple = relative_attention_num_buckets __snake_case : Optional[int] = relative_attention_max_distance __snake_case : Optional[Any] = dropout_rate __snake_case : str = layer_norm_epsilon __snake_case : List[str] = initializer_factor __snake_case : int = feed_forward_proj __snake_case : Optional[Any] = use_cache __snake_case : Optional[Any] = self.feed_forward_proj.split('-' ) __snake_case : Dict = act_info[-1] __snake_case : List[str] = act_info[0] == 'gated' if len(__a ) > 1 and act_info[0] != "gated" or len(__a ) > 2: raise ValueError( f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.''' 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": __snake_case : Dict = 'gelu_new' super().__init__( pad_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , **__a , ) class snake_case__ ( SCREAMING_SNAKE_CASE_ ): @property def A_ ( self : str ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' __snake_case : Union[str, Any] = { 'input_ids': {0: 'batch', 1: 'encoder_sequence'}, 'attention_mask': {0: 'batch', 1: 'encoder_sequence'}, } if self.use_past: __snake_case : Tuple = 'past_encoder_sequence + sequence' __snake_case : Dict = {0: 'batch'} __snake_case : Dict = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: __snake_case : Tuple = {0: 'batch', 1: 'decoder_sequence'} __snake_case : int = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(__a , direction='inputs' ) return common_inputs @property def A_ ( self : List[Any] ) -> int: '''simple docstring''' return 13
0
1
'''simple docstring''' import functools from typing import Any def a_ ( _UpperCAmelCase : str ,_UpperCAmelCase : list[str] ) -> bool: # Validation if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ) or len(_UpperCAmelCase ) == 0: raise ValueError('the string should be not empty string' ) if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ) or not all( isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and len(_UpperCAmelCase ) > 0 for item in words ): raise ValueError('the words should be a list of non-empty strings' ) # Build trie __snake_case : dict[str, Any] = {} __snake_case : Any = 'WORD_KEEPER' for word in words: __snake_case : Optional[Any] = trie for c in word: if c not in trie_node: __snake_case : int = {} __snake_case : Any = trie_node[c] __snake_case : str = True __snake_case : Optional[int] = len(_UpperCAmelCase ) # Dynamic programming method @functools.cache def is_breakable(_UpperCAmelCase : int ) -> bool: if index == len_string: return True __snake_case : List[str] = trie for i in range(_UpperCAmelCase ,_UpperCAmelCase ): __snake_case : List[str] = trie_node.get(string[i] ,_UpperCAmelCase ) if trie_node is None: return False if trie_node.get(_UpperCAmelCase ,_UpperCAmelCase ) and is_breakable(i + 1 ): return True return False return is_breakable(0 ) if __name__ == "__main__": import doctest doctest.testmod()
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging A__ : Tuple = logging.get_logger(__name__) A__ : Optional[int] = {} class snake_case__ ( SCREAMING_SNAKE_CASE_ ): A__ = '''llama''' A__ = ['''past_key_values'''] def __init__( self : Any , __a : List[str]=32000 , __a : Union[str, Any]=4096 , __a : Optional[Any]=11008 , __a : Any=32 , __a : str=32 , __a : Optional[int]=None , __a : Dict="silu" , __a : Dict=2048 , __a : List[str]=0.0_2 , __a : Union[str, Any]=1e-6 , __a : Dict=True , __a : List[str]=0 , __a : Tuple=1 , __a : Tuple=2 , __a : Optional[Any]=1 , __a : Any=False , __a : Tuple=None , **__a : List[Any] , ) -> Optional[int]: '''simple docstring''' __snake_case : str = vocab_size __snake_case : List[str] = max_position_embeddings __snake_case : List[Any] = hidden_size __snake_case : Union[str, Any] = intermediate_size __snake_case : Optional[int] = num_hidden_layers __snake_case : List[Any] = num_attention_heads # for backward compatibility if num_key_value_heads is None: __snake_case : Optional[int] = num_attention_heads __snake_case : Optional[Any] = num_key_value_heads __snake_case : int = hidden_act __snake_case : Any = initializer_range __snake_case : Any = rms_norm_eps __snake_case : Union[str, Any] = pretraining_tp __snake_case : Optional[int] = use_cache __snake_case : Any = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , tie_word_embeddings=__a , **__a , ) def A_ ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __a ) or len(self.rope_scaling ) != 2: raise ValueError( '`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ' f'''got {self.rope_scaling}''' ) __snake_case : Optional[Any] = self.rope_scaling.get('type' , __a ) __snake_case : Tuple = self.rope_scaling.get('factor' , __a ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(__a , __a ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
0
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging A__ : Tuple = logging.get_logger(__name__) A__ : Optional[int] = {} class snake_case__ ( SCREAMING_SNAKE_CASE_ ): A__ = '''llama''' A__ = ['''past_key_values'''] def __init__( self : Any , __a : List[str]=32000 , __a : Union[str, Any]=4096 , __a : Optional[Any]=11008 , __a : Any=32 , __a : str=32 , __a : Optional[int]=None , __a : Dict="silu" , __a : Dict=2048 , __a : List[str]=0.0_2 , __a : Union[str, Any]=1e-6 , __a : Dict=True , __a : List[str]=0 , __a : Tuple=1 , __a : Tuple=2 , __a : Optional[Any]=1 , __a : Any=False , __a : Tuple=None , **__a : List[Any] , ) -> Optional[int]: '''simple docstring''' __snake_case : str = vocab_size __snake_case : List[str] = max_position_embeddings __snake_case : List[Any] = hidden_size __snake_case : Union[str, Any] = intermediate_size __snake_case : Optional[int] = num_hidden_layers __snake_case : List[Any] = num_attention_heads # for backward compatibility if num_key_value_heads is None: __snake_case : Optional[int] = num_attention_heads __snake_case : Optional[Any] = num_key_value_heads __snake_case : int = hidden_act __snake_case : Any = initializer_range __snake_case : Any = rms_norm_eps __snake_case : Union[str, Any] = pretraining_tp __snake_case : Optional[int] = use_cache __snake_case : Any = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , tie_word_embeddings=__a , **__a , ) def A_ ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __a ) or len(self.rope_scaling ) != 2: raise ValueError( '`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ' f'''got {self.rope_scaling}''' ) __snake_case : Optional[Any] = self.rope_scaling.get('type' , __a ) __snake_case : Tuple = self.rope_scaling.get('factor' , __a ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(__a , __a ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
0
'''simple docstring''' from __future__ import annotations A__ : str = '''Muhammad Umer Farooq''' A__ : int = '''MIT''' A__ : Optional[int] = '''1.0.0''' A__ : List[Any] = '''Muhammad Umer Farooq''' A__ : Optional[Any] = '''[email protected]''' A__ : Optional[Any] = '''Alpha''' import re from html.parser import HTMLParser from urllib import parse import requests class snake_case__ ( SCREAMING_SNAKE_CASE_ ): def __init__( self : Union[str, Any] , __a : str ) -> None: '''simple docstring''' super().__init__() __snake_case : list[str] = [] __snake_case : Dict = domain def A_ ( self : Dict , __a : str , __a : list[tuple[str, str | None]] ) -> None: '''simple docstring''' # Only parse the 'anchor' tag. if tag == "a": # Check the list of defined attributes. for name, value in attrs: # If href is defined, and not empty nor # print it. if name == "href" and value != "#" and value != "": # If not already in urls. if value not in self.urls: __snake_case : Optional[Any] = parse.urljoin(self.domain , __a ) self.urls.append(__a ) def a_ ( _UpperCAmelCase : str ) -> str: return ".".join(get_sub_domain_name(_UpperCAmelCase ).split('.' )[-2:] ) def a_ ( _UpperCAmelCase : str ) -> str: return parse.urlparse(_UpperCAmelCase ).netloc def a_ ( _UpperCAmelCase : str = "https://github.com" ) -> list[str]: __snake_case : List[Any] = get_domain_name(_UpperCAmelCase ) # Initialize the parser __snake_case : Tuple = Parser(_UpperCAmelCase ) try: # Open URL __snake_case : Any = requests.get(_UpperCAmelCase ) # pass the raw HTML to the parser to get links parser.feed(r.text ) # Get links and loop through __snake_case : Dict = set() for link in parser.urls: # open URL. # read = requests.get(link) try: __snake_case : List[Any] = requests.get(_UpperCAmelCase ) # Get the valid email. __snake_case : Optional[Any] = re.findall('[a-zA-Z0-9]+@' + domain ,read.text ) # If not in list then append it. for email in emails: valid_emails.add(_UpperCAmelCase ) except ValueError: pass except ValueError: raise SystemExit(1 ) # Finally return a sorted list of email addresses with no duplicates. return sorted(_UpperCAmelCase ) if __name__ == "__main__": A__ : Tuple = emails_from_url('''https://github.com''') print(F"""{len(emails)} emails found:""") print('''\n'''.join(sorted(emails)))
0
1
'''simple docstring''' import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class snake_case__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): A__ = AutoencoderKL A__ = '''sample''' A__ = 1E-2 @property def A_ ( self : List[str] ) -> Dict: '''simple docstring''' __snake_case : List[str] = 4 __snake_case : int = 3 __snake_case : Dict = (32, 32) __snake_case : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(__a ) return {"sample": image} @property def A_ ( self : Dict ) -> Tuple: '''simple docstring''' return (3, 32, 32) @property def A_ ( self : List[Any] ) -> Optional[int]: '''simple docstring''' return (3, 32, 32) def A_ ( self : Optional[int] ) -> Dict: '''simple docstring''' __snake_case : Tuple = { 'block_out_channels': [32, 64], 'in_channels': 3, 'out_channels': 3, 'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'], 'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'], 'latent_channels': 4, } __snake_case : Union[str, Any] = self.dummy_input return init_dict, inputs_dict def A_ ( self : List[str] ) -> Optional[int]: '''simple docstring''' pass def A_ ( self : Tuple ) -> List[Any]: '''simple docstring''' pass @unittest.skipIf(torch_device == 'mps' , 'Gradient checkpointing skipped on MPS' ) def A_ ( self : Union[str, Any] ) -> int: '''simple docstring''' # enable deterministic behavior for gradient checkpointing __snake_case , __snake_case : Any = self.prepare_init_args_and_inputs_for_common() __snake_case : Union[str, Any] = self.model_class(**__a ) model.to(__a ) assert not model.is_gradient_checkpointing and model.training __snake_case : Union[str, Any] = model(**__a ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() __snake_case : List[str] = torch.randn_like(__a ) __snake_case : List[Any] = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing __snake_case : Optional[Any] = self.model_class(**__a ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(__a ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training __snake_case : Tuple = model_a(**__a ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() __snake_case : Optional[int] = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1e-5 ) __snake_case : int = dict(model.named_parameters() ) __snake_case : int = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) ) def A_ ( self : List[str] ) -> Dict: '''simple docstring''' __snake_case , __snake_case : int = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' , output_loading_info=__a ) self.assertIsNotNone(__a ) self.assertEqual(len(loading_info['missing_keys'] ) , 0 ) model.to(__a ) __snake_case : Optional[Any] = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def A_ ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' __snake_case : Any = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' ) __snake_case : Tuple = model.to(__a ) model.eval() if torch_device == "mps": __snake_case : str = torch.manual_seed(0 ) else: __snake_case : List[Any] = torch.Generator(device=__a ).manual_seed(0 ) __snake_case : str = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) __snake_case : Union[str, Any] = image.to(__a ) with torch.no_grad(): __snake_case : Any = model(__a , sample_posterior=__a , generator=__a ).sample __snake_case : List[Any] = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": __snake_case : Dict = torch.tensor( [ -4.00_78e-01, -3.83_23e-04, -1.26_81e-01, -1.14_62e-01, 2.00_95e-01, 1.08_93e-01, -8.82_47e-02, -3.03_61e-01, -9.86_44e-03, ] ) elif torch_device == "cpu": __snake_case : Optional[int] = torch.tensor( [-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] ) else: __snake_case : Optional[int] = torch.tensor( [-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] ) self.assertTrue(torch_all_close(__a , __a , rtol=1e-2 ) ) @slow class snake_case__ ( unittest.TestCase ): def A_ ( self : Dict , __a : Tuple , __a : Optional[Any] ) -> Any: '''simple docstring''' return f'''gaussian_noise_s={seed}_shape={"_".join([str(__a ) for s in shape] )}.npy''' def A_ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def A_ ( self : Optional[int] , __a : List[str]=0 , __a : str=(4, 3, 512, 512) , __a : List[str]=False ) -> List[Any]: '''simple docstring''' __snake_case : str = torch.floataa if fpaa else torch.floataa __snake_case : Optional[int] = torch.from_numpy(load_hf_numpy(self.get_file_format(__a , __a ) ) ).to(__a ).to(__a ) return image def A_ ( self : Union[str, Any] , __a : Union[str, Any]="CompVis/stable-diffusion-v1-4" , __a : str=False ) -> Tuple: '''simple docstring''' __snake_case : Any = 'fp16' if fpaa else None __snake_case : Dict = torch.floataa if fpaa else torch.floataa __snake_case : int = AutoencoderKL.from_pretrained( __a , subfolder='vae' , torch_dtype=__a , revision=__a , ) model.to(__a ).eval() return model def A_ ( self : Dict , __a : Optional[int]=0 ) -> Dict: '''simple docstring''' if torch_device == "mps": return torch.manual_seed(__a ) return torch.Generator(device=__a ).manual_seed(__a ) @parameterized.expand( [ # fmt: off [33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]], [47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]], # fmt: on ] ) def A_ ( self : str , __a : Any , __a : int , __a : Tuple ) -> Any: '''simple docstring''' __snake_case : Tuple = self.get_sd_vae_model() __snake_case : int = self.get_sd_image(__a ) __snake_case : Optional[int] = self.get_generator(__a ) with torch.no_grad(): __snake_case : List[str] = model(__a , generator=__a , sample_posterior=__a ).sample assert sample.shape == image.shape __snake_case : Union[str, Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu() __snake_case : Dict = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice ) assert torch_all_close(__a , __a , atol=3e-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]], [47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]], # fmt: on ] ) @require_torch_gpu def A_ ( self : Union[str, Any] , __a : Tuple , __a : Union[str, Any] ) -> Any: '''simple docstring''' __snake_case : List[Any] = self.get_sd_vae_model(fpaa=__a ) __snake_case : List[str] = self.get_sd_image(__a , fpaa=__a ) __snake_case : List[str] = self.get_generator(__a ) with torch.no_grad(): __snake_case : Any = model(__a , generator=__a , sample_posterior=__a ).sample assert sample.shape == image.shape __snake_case : Optional[int] = sample[-1, -2:, :2, -2:].flatten().float().cpu() __snake_case : Optional[Any] = torch.tensor(__a ) assert torch_all_close(__a , __a , atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]], [47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]], # fmt: on ] ) def A_ ( self : Optional[int] , __a : Any , __a : Optional[int] , __a : List[Any] ) -> Optional[int]: '''simple docstring''' __snake_case : List[Any] = self.get_sd_vae_model() __snake_case : Union[str, Any] = self.get_sd_image(__a ) with torch.no_grad(): __snake_case : int = model(__a ).sample assert sample.shape == image.shape __snake_case : Optional[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu() __snake_case : Any = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice ) assert torch_all_close(__a , __a , atol=3e-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]], [37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]], # fmt: on ] ) @require_torch_gpu def A_ ( self : Optional[Any] , __a : Optional[int] , __a : Optional[Any] ) -> List[Any]: '''simple docstring''' __snake_case : Union[str, Any] = self.get_sd_vae_model() __snake_case : Any = self.get_sd_image(__a , shape=(3, 4, 64, 64) ) with torch.no_grad(): __snake_case : Optional[Any] = model.decode(__a ).sample assert list(sample.shape ) == [3, 3, 512, 512] __snake_case : Tuple = sample[-1, -2:, :2, -2:].flatten().cpu() __snake_case : Optional[Any] = torch.tensor(__a ) assert torch_all_close(__a , __a , atol=1e-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]], [16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]], # fmt: on ] ) @require_torch_gpu def A_ ( self : Dict , __a : Optional[int] , __a : int ) -> Tuple: '''simple docstring''' __snake_case : int = self.get_sd_vae_model(fpaa=__a ) __snake_case : Tuple = self.get_sd_image(__a , shape=(3, 4, 64, 64) , fpaa=__a ) with torch.no_grad(): __snake_case : List[Any] = model.decode(__a ).sample assert list(sample.shape ) == [3, 3, 512, 512] __snake_case : int = sample[-1, -2:, :2, -2:].flatten().float().cpu() __snake_case : List[Any] = torch.tensor(__a ) assert torch_all_close(__a , __a , atol=5e-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' ) def A_ ( self : int , __a : Optional[int] ) -> str: '''simple docstring''' __snake_case : Optional[Any] = self.get_sd_vae_model(fpaa=__a ) __snake_case : Optional[int] = self.get_sd_image(__a , shape=(3, 4, 64, 64) , fpaa=__a ) with torch.no_grad(): __snake_case : List[Any] = model.decode(__a ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): __snake_case : Optional[Any] = model.decode(__a ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(__a , __a , atol=1e-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' ) def A_ ( self : int , __a : List[str] ) -> Union[str, Any]: '''simple docstring''' __snake_case : Any = self.get_sd_vae_model() __snake_case : Tuple = self.get_sd_image(__a , shape=(3, 4, 64, 64) ) with torch.no_grad(): __snake_case : Dict = model.decode(__a ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): __snake_case : List[Any] = model.decode(__a ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(__a , __a , atol=1e-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]], [47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]], # fmt: on ] ) def A_ ( self : Dict , __a : List[str] , __a : List[str] ) -> int: '''simple docstring''' __snake_case : List[Any] = self.get_sd_vae_model() __snake_case : Any = self.get_sd_image(__a ) __snake_case : Optional[int] = self.get_generator(__a ) with torch.no_grad(): __snake_case : Union[str, Any] = model.encode(__a ).latent_dist __snake_case : Any = dist.sample(generator=__a ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] __snake_case : Tuple = sample[0, -1, -3:, -3:].flatten().cpu() __snake_case : Optional[int] = torch.tensor(__a ) __snake_case : Optional[int] = 3e-3 if torch_device != 'mps' else 1e-2 assert torch_all_close(__a , __a , atol=__a )
0
'''simple docstring''' import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) A__ : Dict = logging.getLogger() def a_ ( ) -> Tuple: __snake_case : List[Any] = argparse.ArgumentParser() parser.add_argument('-f' ) __snake_case : Any = parser.parse_args() return args.f def a_ ( _UpperCAmelCase : Optional[int] ) -> List[Any]: __snake_case : Tuple = {} __snake_case : Union[str, Any] = os.path.join(_UpperCAmelCase ,'all_results.json' ) if os.path.exists(_UpperCAmelCase ): with open(_UpperCAmelCase ,'r' ) as f: __snake_case : List[str] = json.load(_UpperCAmelCase ) else: raise ValueError(f'''can\'t find {path}''' ) return results def a_ ( ) -> Union[str, Any]: __snake_case : Union[str, Any] = torch.cuda.is_available() and torch_device == 'cuda' return is_using_cuda and is_apex_available() A__ : str = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class snake_case__ ( SCREAMING_SNAKE_CASE_ ): @classmethod def A_ ( cls : Any ) -> List[str]: '''simple docstring''' # Write Accelerate config, will pick up on CPU, GPU, and multi-GPU __snake_case : Optional[int] = tempfile.mkdtemp() __snake_case : Dict = os.path.join(cls.tmpdir , 'default_config.yml' ) write_basic_config(save_location=cls.configPath ) __snake_case : List[Any] = ['accelerate', 'launch', '--config_file', cls.configPath] @classmethod def A_ ( cls : List[str] ) -> List[str]: '''simple docstring''' shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : Any ) -> Optional[Any]: '''simple docstring''' __snake_case : List[Any] = self.get_auto_remove_tmp_dir() __snake_case : Dict = f''' {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --seed=42 --checkpointing_steps epoch --with_tracking '''.split() if is_cuda_and_apex_available(): testargs.append('--fp16' ) run_command(self._launch_args + testargs ) __snake_case : List[Any] = get_results(__a ) self.assertGreaterEqual(result['eval_accuracy'] , 0.7_5 ) self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'glue_no_trainer' ) ) ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : List[Any] ) -> Union[str, Any]: '''simple docstring''' __snake_case : Tuple = self.get_auto_remove_tmp_dir() __snake_case : str = f''' {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --block_size 128 --per_device_train_batch_size 5 --per_device_eval_batch_size 5 --num_train_epochs 2 --output_dir {tmp_dir} --checkpointing_steps epoch --with_tracking '''.split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) __snake_case : str = get_results(__a ) self.assertLess(result['perplexity'] , 100 ) self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'clm_no_trainer' ) ) ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : str ) -> List[str]: '''simple docstring''' __snake_case : int = self.get_auto_remove_tmp_dir() __snake_case : List[str] = f''' {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --num_train_epochs=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) __snake_case : List[str] = get_results(__a ) self.assertLess(result['perplexity'] , 42 ) self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'mlm_no_trainer' ) ) ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu __snake_case : Any = 7 if get_gpu_count() > 1 else 2 __snake_case : Any = self.get_auto_remove_tmp_dir() __snake_case : int = f''' {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) __snake_case : Dict = get_results(__a ) self.assertGreaterEqual(result['eval_accuracy'] , 0.7_5 ) self.assertLess(result['train_loss'] , 0.5 ) self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'ner_no_trainer' ) ) ) @unittest.skip(reason='Fix me @muellerzr' ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : Any ) -> List[Any]: '''simple docstring''' __snake_case : Any = self.get_auto_remove_tmp_dir() __snake_case : Tuple = f''' {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --seed=42 --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) __snake_case : str = get_results(__a ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result['eval_f1'] , 28 ) self.assertGreaterEqual(result['eval_exact'] , 28 ) self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'qa_no_trainer' ) ) ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : Dict ) -> List[Any]: '''simple docstring''' __snake_case : str = self.get_auto_remove_tmp_dir() __snake_case : Any = f''' {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/swag/sample.json --validation_file tests/fixtures/tests_samples/swag/sample.json --output_dir {tmp_dir} --max_train_steps=20 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --with_tracking '''.split() run_command(self._launch_args + testargs ) __snake_case : str = get_results(__a ) self.assertGreaterEqual(result['eval_accuracy'] , 0.8 ) self.assertTrue(os.path.exists(os.path.join(__a , 'swag_no_trainer' ) ) ) @slow @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : Any ) -> Union[str, Any]: '''simple docstring''' __snake_case : Tuple = self.get_auto_remove_tmp_dir() __snake_case : List[str] = f''' {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) __snake_case : int = get_results(__a ) self.assertGreaterEqual(result['eval_rouge1'] , 10 ) self.assertGreaterEqual(result['eval_rouge2'] , 2 ) self.assertGreaterEqual(result['eval_rougeL'] , 7 ) self.assertGreaterEqual(result['eval_rougeLsum'] , 7 ) self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'summarization_no_trainer' ) ) ) @slow @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : Union[str, Any] ) -> int: '''simple docstring''' __snake_case : Tuple = self.get_auto_remove_tmp_dir() __snake_case : str = f''' {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py --model_name_or_path sshleifer/student_marian_en_ro_6_1 --source_lang en --target_lang ro --train_file tests/fixtures/tests_samples/wmt16/sample.json --validation_file tests/fixtures/tests_samples/wmt16/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --num_beams=6 --learning_rate=3e-3 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --source_lang en_XX --target_lang ro_RO --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) __snake_case : Dict = get_results(__a ) self.assertGreaterEqual(result['eval_bleu'] , 30 ) self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'translation_no_trainer' ) ) ) @slow def A_ ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' __snake_case : Union[str, Any] = logging.StreamHandler(sys.stdout ) logger.addHandler(__a ) __snake_case : List[str] = self.get_auto_remove_tmp_dir() __snake_case : int = f''' {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py --dataset_name huggingface/semantic-segmentation-test-sample --output_dir {tmp_dir} --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch '''.split() run_command(self._launch_args + testargs ) __snake_case : List[str] = get_results(__a ) self.assertGreaterEqual(result['eval_overall_accuracy'] , 0.1_0 ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : Tuple ) -> Any: '''simple docstring''' __snake_case : Dict = self.get_auto_remove_tmp_dir() __snake_case : Dict = f''' {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py --model_name_or_path google/vit-base-patch16-224-in21k --dataset_name hf-internal-testing/cats_vs_dogs_sample --learning_rate 1e-4 --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --max_train_steps 2 --train_val_split 0.1 --seed 42 --output_dir {tmp_dir} --with_tracking --checkpointing_steps 1 '''.split() if is_cuda_and_apex_available(): testargs.append('--fp16' ) run_command(self._launch_args + testargs ) __snake_case : Optional[int] = get_results(__a ) # The base model scores a 25% self.assertGreaterEqual(result['eval_accuracy'] , 0.6 ) self.assertTrue(os.path.exists(os.path.join(__a , 'step_1' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'image_classification_no_trainer' ) ) )
0
1
'''simple docstring''' import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class snake_case__ ( unittest.TestCase ): A__ = JukeboxTokenizer A__ = { '''artist''': '''Zac Brown Band''', '''genres''': '''Country''', '''lyrics''': '''I met a traveller from an antique land, Who said "Two vast and trunkless legs of stone Stand in the desert. . . . Near them, on the sand, Half sunk a shattered visage lies, whose frown, And wrinkled lip, and sneer of cold command, Tell that its sculptor well those passions read Which yet survive, stamped on these lifeless things, The hand that mocked them, and the heart that fed; And on the pedestal, these words appear: My name is Ozymandias, King of Kings; Look on my Works, ye Mighty, and despair! Nothing beside remains. Round the decay Of that colossal Wreck, boundless and bare The lone and level sands stretch far away ''', } @require_torch def A_ ( self : Any ) -> Dict: '''simple docstring''' import torch __snake_case : List[Any] = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics' ) __snake_case : Optional[Any] = tokenizer(**self.metas )['input_ids'] # fmt: off __snake_case : List[str] = [ torch.tensor([[ 0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) @require_torch def A_ ( self : Union[str, Any] ) -> int: '''simple docstring''' import torch __snake_case : List[str] = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics' ) __snake_case : str = tokenizer(**self.metas )['input_ids'] # fmt: off __snake_case : Optional[Any] = [ torch.tensor([[ 0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
0
'''simple docstring''' import math def a_ ( _UpperCAmelCase : int ) -> list: __snake_case : Optional[Any] = [True] * n __snake_case : Optional[int] = False __snake_case : Dict = False __snake_case : List[Any] = True for i in range(3 ,int(n**0.5 + 1 ) ,2 ): __snake_case : Optional[int] = i * 2 while index < n: __snake_case : Union[str, Any] = False __snake_case : int = index + i __snake_case : Dict = [2] for i in range(3 ,_UpperCAmelCase ,2 ): if is_prime[i]: primes.append(_UpperCAmelCase ) return primes def a_ ( _UpperCAmelCase : int = 99_99_66_66_33_33 ) -> int: __snake_case : List[Any] = math.floor(math.sqrt(_UpperCAmelCase ) ) + 1_00 __snake_case : Tuple = prime_sieve(_UpperCAmelCase ) __snake_case : List[Any] = 0 __snake_case : List[Any] = 0 __snake_case : Optional[int] = primes[prime_index] while (last_prime**2) <= limit: __snake_case : Optional[int] = primes[prime_index + 1] __snake_case : Union[str, Any] = last_prime**2 __snake_case : Dict = next_prime**2 # Get numbers divisible by lps(current) __snake_case : Optional[Any] = lower_bound + last_prime while upper_bound > current <= limit: matches_sum += current current += last_prime # Reset the upper_bound while (upper_bound - next_prime) > limit: upper_bound -= next_prime # Add the numbers divisible by ups(current) __snake_case : Optional[Any] = upper_bound - next_prime while current > lower_bound: matches_sum += current current -= next_prime # Remove the numbers divisible by both ups and lps __snake_case : List[str] = 0 while upper_bound > current <= limit: if current <= lower_bound: # Increment the current number current += last_prime * next_prime continue if current > limit: break # Remove twice since it was added by both ups and lps matches_sum -= current * 2 # Increment the current number current += last_prime * next_prime # Setup for next pair __snake_case : Dict = next_prime prime_index += 1 return matches_sum if __name__ == "__main__": print(solution())
0
1
'''simple docstring''' from datetime import datetime as dt import os from github import Github A__ : Union[str, Any] = [ '''good first issue''', '''good second issue''', '''good difficult issue''', '''feature request''', '''new model''', '''wip''', ] def a_ ( ) -> Optional[Any]: __snake_case : int = Github(os.environ['GITHUB_TOKEN'] ) __snake_case : str = g.get_repo('huggingface/transformers' ) __snake_case : Any = repo.get_issues(state='open' ) for issue in open_issues: __snake_case : int = sorted([comment for comment in issue.get_comments()] ,key=lambda _UpperCAmelCase : i.created_at ,reverse=_UpperCAmelCase ) __snake_case : Optional[int] = comments[0] if len(_UpperCAmelCase ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state='closed' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( 'This issue has been automatically marked as stale because it has not had ' 'recent activity. If you think this still needs to be addressed ' 'please comment on this thread.\n\nPlease note that issues that do not follow the ' '[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) ' 'are likely to be ignored.' ) if __name__ == "__main__": main()
0
'''simple docstring''' def a_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ) -> float: return price * (1 + tax_rate) if __name__ == "__main__": print(F"""{price_plus_tax(1_0_0, 0.25) = }""") print(F"""{price_plus_tax(1_25.50, 0.05) = }""")
0
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging A__ : List[str] = logging.get_logger(__name__) A__ : Tuple = { '''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''', } class snake_case__ ( SCREAMING_SNAKE_CASE_ ): A__ = '''switch_transformers''' A__ = ['''past_key_values'''] A__ = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self : Any , __a : Dict=32128 , __a : List[str]=768 , __a : Any=64 , __a : Tuple=2048 , __a : int=64 , __a : Union[str, Any]=12 , __a : Dict=3 , __a : List[Any]=12 , __a : Optional[Any]=3 , __a : Any=12 , __a : List[Any]=8 , __a : int=False , __a : Optional[int]=0.0_1 , __a : Optional[Any]="float32" , __a : Union[str, Any]=False , __a : Dict=32 , __a : Optional[Any]=128 , __a : Dict=0.1 , __a : Any=1e-6 , __a : Union[str, Any]=0.0_0_1 , __a : List[Any]=0.0_0_1 , __a : Tuple=1.0 , __a : Optional[Any]="relu" , __a : Union[str, Any]=True , __a : Dict=False , __a : Union[str, Any]=True , __a : Any=0 , __a : Optional[int]=1 , **__a : str , ) -> Dict: '''simple docstring''' __snake_case : Tuple = vocab_size __snake_case : List[str] = d_model __snake_case : List[Any] = d_kv __snake_case : str = d_ff __snake_case : str = num_sparse_encoder_layers __snake_case : Any = num_layers __snake_case : List[str] = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry __snake_case : Any = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: __snake_case : List[str] = self.num_layers // self.num_sparse_encoder_layers else: __snake_case : List[Any] = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: __snake_case : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers else: __snake_case : Optional[Any] = self.num_decoder_layers # HACK: this will create 0 sparse layers __snake_case : Tuple = num_heads __snake_case : int = num_experts __snake_case : Tuple = expert_capacity __snake_case : List[str] = router_bias __snake_case : Tuple = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' ) __snake_case : Optional[Any] = router_dtype __snake_case : Dict = router_ignore_padding_tokens __snake_case : Dict = relative_attention_num_buckets __snake_case : Union[str, Any] = relative_attention_max_distance __snake_case : Dict = dropout_rate __snake_case : int = layer_norm_epsilon __snake_case : str = initializer_factor __snake_case : Optional[int] = feed_forward_proj __snake_case : List[str] = use_cache __snake_case : List[str] = add_router_probs __snake_case : Optional[int] = router_z_loss_coef __snake_case : Optional[int] = router_aux_loss_coef __snake_case : List[Any] = self.feed_forward_proj.split('-' ) __snake_case : str = act_info[-1] __snake_case : Optional[Any] = act_info[0] == 'gated' if len(__a ) > 1 and act_info[0] != "gated" or len(__a ) > 2: raise ValueError( f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.''' 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": __snake_case : int = 'gelu_new' super().__init__( pad_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , **__a , )
0
'''simple docstring''' from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class snake_case__ ( SCREAMING_SNAKE_CASE_ ): def A_ ( self : List[Any] ) -> int: '''simple docstring''' __snake_case : Optional[int] = SMALL_MODEL_IDENTIFIER __snake_case : str = 'pt' __snake_case : Union[str, Any] = 'tf' def A_ ( self : Dict , __a : Tuple ) -> Dict: '''simple docstring''' __snake_case : Optional[int] = AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(__a ) def A_ ( self : Any , __a : Optional[Any] ) -> Dict: '''simple docstring''' __snake_case : Union[str, Any] = TFAutoModel.from_pretrained(self.test_model , from_pt=__a ) model_tf.save_pretrained(__a ) def A_ ( self : Any ) -> Tuple: '''simple docstring''' __snake_case : Tuple = 'mock_framework' # Framework provided - return whatever the user provides __snake_case : int = FeaturesManager.determine_framework(self.test_model , __a ) self.assertEqual(__a , __a ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(__a ) __snake_case : List[Any] = FeaturesManager.determine_framework(__a , __a ) self.assertEqual(__a , __a ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(__a ) __snake_case : Tuple = FeaturesManager.determine_framework(__a , __a ) self.assertEqual(__a , __a ) def A_ ( self : Union[str, Any] ) -> Any: '''simple docstring''' # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(__a ) __snake_case : Tuple = FeaturesManager.determine_framework(__a ) self.assertEqual(__a , self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(__a ) __snake_case : Union[str, Any] = FeaturesManager.determine_framework(__a ) self.assertEqual(__a , self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(__a ): __snake_case : Optional[int] = FeaturesManager.determine_framework(__a ) def A_ ( self : Any ) -> List[Any]: '''simple docstring''' __snake_case : Union[str, Any] = MagicMock(return_value=__a ) with patch('transformers.onnx.features.is_tf_available' , __a ): __snake_case : int = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(__a , self.framework_pt ) # PyTorch not in environment -> use TensorFlow __snake_case : Tuple = MagicMock(return_value=__a ) with patch('transformers.onnx.features.is_torch_available' , __a ): __snake_case : Dict = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(__a , self.framework_tf ) # Both in environment -> use PyTorch __snake_case : Optional[Any] = MagicMock(return_value=__a ) __snake_case : Tuple = MagicMock(return_value=__a ) with patch('transformers.onnx.features.is_tf_available' , __a ), patch( 'transformers.onnx.features.is_torch_available' , __a ): __snake_case : Dict = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(__a , self.framework_pt ) # Both not in environment -> raise error __snake_case : str = MagicMock(return_value=__a ) __snake_case : List[Any] = MagicMock(return_value=__a ) with patch('transformers.onnx.features.is_tf_available' , __a ), patch( 'transformers.onnx.features.is_torch_available' , __a ): with self.assertRaises(__a ): __snake_case : Tuple = FeaturesManager.determine_framework(self.test_model )
0
1
'''simple docstring''' import os from collections import deque import torch from torch.utils.data import Dataset class snake_case__ ( SCREAMING_SNAKE_CASE_ ): def __init__( self : List[str] , __a : Tuple="" , __a : Any="train" ) -> Optional[Any]: '''simple docstring''' assert os.path.isdir(__a ) __snake_case : str = [] __snake_case : List[str] = os.listdir(__a ) for story_filename in story_filenames_list: if "summary" in story_filename: continue __snake_case : str = os.path.join(__a , __a ) if not os.path.isfile(__a ): continue self.documents.append(__a ) def __len__( self : Optional[Any] ) -> Optional[int]: '''simple docstring''' return len(self.documents ) def __getitem__( self : Tuple , __a : Tuple ) -> int: '''simple docstring''' __snake_case : Optional[Any] = self.documents[idx] __snake_case : Any = document_path.split('/' )[-1] with open(__a , encoding='utf-8' ) as source: __snake_case : List[str] = source.read() __snake_case , __snake_case : Any = process_story(__a ) return document_name, story_lines, summary_lines def a_ ( _UpperCAmelCase : Optional[int] ) -> Tuple: __snake_case : Union[str, Any] = list(filter(lambda _UpperCAmelCase : len(_UpperCAmelCase ) != 0 ,[line.strip() for line in raw_story.split('\n' )] ) ) # for some unknown reason some lines miss a period, add it __snake_case : Tuple = [_add_missing_period(_UpperCAmelCase ) for line in nonempty_lines] # gather article lines __snake_case : Optional[Any] = [] __snake_case : str = deque(_UpperCAmelCase ) while True: try: __snake_case : str = lines.popleft() if element.startswith('@highlight' ): break story_lines.append(_UpperCAmelCase ) except IndexError: # if "@highlight" is absent from the file we pop # all elements until there is None, raising an exception. return story_lines, [] # gather summary lines __snake_case : Optional[Any] = list(filter(lambda _UpperCAmelCase : not t.startswith('@highlight' ) ,_UpperCAmelCase ) ) return story_lines, summary_lines def a_ ( _UpperCAmelCase : Union[str, Any] ) -> int: __snake_case : List[Any] = ['.', '!', '?', '...', '\'', '`', '"', '\u2019', '\u2019', ')'] if line.startswith('@highlight' ): return line if line[-1] in END_TOKENS: return line return line + "." def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ) -> int: if len(_UpperCAmelCase ) > block_size: return sequence[:block_size] else: sequence.extend([pad_token_id] * (block_size - len(_UpperCAmelCase )) ) return sequence def a_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[str] ) -> List[str]: __snake_case : Optional[Any] = torch.ones_like(_UpperCAmelCase ) __snake_case : List[Any] = sequence == pad_token_id __snake_case : Tuple = 0 return mask def a_ ( _UpperCAmelCase : str ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Optional[Any] ) -> Dict: __snake_case : Optional[int] = [tokenizer.encode(_UpperCAmelCase ) for line in story_lines] __snake_case : Union[str, Any] = [token for sentence in story_lines_token_ids for token in sentence] __snake_case : Any = [tokenizer.encode(_UpperCAmelCase ) for line in summary_lines] __snake_case : str = [token for sentence in summary_lines_token_ids for token in sentence] return story_token_ids, summary_token_ids def a_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : str ) -> Tuple: __snake_case : int = [] for sequence in batch: __snake_case : Union[str, Any] = -1 __snake_case : List[Any] = [] for s in sequence: if s == separator_token_id: sentence_num += 1 embeddings.append(sentence_num % 2 ) batch_embeddings.append(_UpperCAmelCase ) return torch.tensor(_UpperCAmelCase )
0
'''simple docstring''' import os import unittest from transformers import BatchEncoding from transformers.models.bert.tokenization_bert import ( BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer from transformers.testing_utils import require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): A__ = ProphetNetTokenizer A__ = False def A_ ( self : Optional[int] ) -> Dict: '''simple docstring''' super().setUp() __snake_case : Dict = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] __snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def A_ ( self : int , __a : Union[str, Any] ) -> List[str]: '''simple docstring''' __snake_case : Optional[int] = 'UNwant\u00E9d,running' __snake_case : List[str] = 'unwanted, running' return input_text, output_text def A_ ( self : Union[str, Any] ) -> str: '''simple docstring''' __snake_case : Dict = self.tokenizer_class(self.vocab_file ) __snake_case : List[str] = tokenizer.tokenize('UNwant\u00E9d,running' ) self.assertListEqual(__a , ['un', '##want', '##ed', ',', 'runn', '##ing'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [9, 6, 7, 12, 10, 11] ) def A_ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' __snake_case : List[str] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] ) def A_ ( self : Union[str, Any] ) -> str: '''simple docstring''' __snake_case : Optional[int] = BasicTokenizer(do_lower_case=__a ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def A_ ( self : Dict ) -> Optional[int]: '''simple docstring''' __snake_case : List[Any] = BasicTokenizer(do_lower_case=__a , strip_accents=__a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] ) def A_ ( self : int ) -> Any: '''simple docstring''' __snake_case : int = BasicTokenizer(do_lower_case=__a , strip_accents=__a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def A_ ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' __snake_case : Union[str, Any] = BasicTokenizer(do_lower_case=__a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def A_ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' __snake_case : Dict = BasicTokenizer(do_lower_case=__a ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] ) def A_ ( self : Any ) -> List[str]: '''simple docstring''' __snake_case : str = BasicTokenizer(do_lower_case=__a , strip_accents=__a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] ) def A_ ( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' __snake_case : List[Any] = BasicTokenizer(do_lower_case=__a , strip_accents=__a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] ) def A_ ( self : Optional[int] ) -> List[str]: '''simple docstring''' __snake_case : Optional[Any] = BasicTokenizer(do_lower_case=__a , never_split=['[UNK]'] ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] ) def A_ ( self : Optional[int] ) -> List[Any]: '''simple docstring''' __snake_case : Any = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing'] __snake_case : List[Any] = {} for i, token in enumerate(__a ): __snake_case : List[str] = i __snake_case : Any = WordpieceTokenizer(vocab=__a , unk_token='[UNK]' ) self.assertListEqual(tokenizer.tokenize('' ) , [] ) self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] ) self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] ) @require_torch def A_ ( self : Union[str, Any] ) -> Tuple: '''simple docstring''' __snake_case : Optional[Any] = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' ) __snake_case : int = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] __snake_case : str = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102] __snake_case : Union[str, Any] = tokenizer(__a , padding=__a , return_tensors='pt' ) self.assertIsInstance(__a , __a ) __snake_case : int = list(batch.input_ids.numpy()[0] ) self.assertListEqual(__a , __a ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) def A_ ( self : Union[str, Any] ) -> Any: '''simple docstring''' self.assertTrue(_is_whitespace(' ' ) ) self.assertTrue(_is_whitespace('\t' ) ) self.assertTrue(_is_whitespace('\r' ) ) self.assertTrue(_is_whitespace('\n' ) ) self.assertTrue(_is_whitespace('\u00A0' ) ) self.assertFalse(_is_whitespace('A' ) ) self.assertFalse(_is_whitespace('-' ) ) def A_ ( self : Dict ) -> Optional[Any]: '''simple docstring''' self.assertTrue(_is_control('\u0005' ) ) self.assertFalse(_is_control('A' ) ) self.assertFalse(_is_control(' ' ) ) self.assertFalse(_is_control('\t' ) ) self.assertFalse(_is_control('\r' ) ) def A_ ( self : List[Any] ) -> int: '''simple docstring''' self.assertTrue(_is_punctuation('-' ) ) self.assertTrue(_is_punctuation('$' ) ) self.assertTrue(_is_punctuation('`' ) ) self.assertTrue(_is_punctuation('.' ) ) self.assertFalse(_is_punctuation('A' ) ) self.assertFalse(_is_punctuation(' ' ) ) @slow def A_ ( self : str ) -> Optional[int]: '''simple docstring''' __snake_case : str = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' ) __snake_case : Optional[int] = tokenizer.encode('sequence builders' , add_special_tokens=__a ) __snake_case : Optional[int] = tokenizer.encode('multi-sequence build' , add_special_tokens=__a ) __snake_case : Optional[Any] = tokenizer.build_inputs_with_special_tokens(__a ) __snake_case : List[Any] = tokenizer.build_inputs_with_special_tokens(__a , __a ) assert encoded_sentence == text + [102] assert encoded_pair == text + [102] + text_a + [102]
0
1
'''simple docstring''' import math def a_ ( _UpperCAmelCase : int ) -> bool: return math.sqrt(_UpperCAmelCase ) * math.sqrt(_UpperCAmelCase ) == num def a_ ( _UpperCAmelCase : int ) -> bool: __snake_case : str = 0 __snake_case : str = n while left <= right: __snake_case : Optional[int] = (left + right) // 2 if mid**2 == n: return True elif mid**2 > n: __snake_case : int = mid - 1 else: __snake_case : int = mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A__ : Optional[Any] = { '''configuration_nllb_moe''': [ '''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NllbMoeConfig''', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Dict = [ '''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''NllbMoeForConditionalGeneration''', '''NllbMoeModel''', '''NllbMoePreTrainedModel''', '''NllbMoeTop2Router''', '''NllbMoeSparseMLP''', ] if TYPE_CHECKING: from .configuration_nllb_moe import ( NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, NllbMoeConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nllb_moe import ( NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST, NllbMoeForConditionalGeneration, NllbMoeModel, NllbMoePreTrainedModel, NllbMoeSparseMLP, NllbMoeTopaRouter, ) else: import sys A__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
0
1
'''simple docstring''' def a_ ( _UpperCAmelCase : int ) -> int: if divisor % 5 == 0 or divisor % 2 == 0: return 0 __snake_case : Dict = 1 __snake_case : List[str] = 1 while repunit: __snake_case : Dict = (10 * repunit + 1) % divisor repunit_index += 1 return repunit_index def a_ ( _UpperCAmelCase : int = 1_00_00_00 ) -> int: __snake_case : Optional[int] = limit - 1 if divisor % 2 == 0: divisor += 1 while least_divisible_repunit(_UpperCAmelCase ) <= limit: divisor += 2 return divisor if __name__ == "__main__": print(F"""{solution() = }""")
0
'''simple docstring''' def a_ ( _UpperCAmelCase : int ) -> list: # bit count represents no. of bits in the gray code if bit_count < 0: raise ValueError('The given input must be positive' ) # get the generated string sequence __snake_case : Optional[Any] = gray_code_sequence_string(_UpperCAmelCase ) # # convert them to integers for i in range(len(_UpperCAmelCase ) ): __snake_case : Optional[Any] = int(sequence[i] ,2 ) return sequence def a_ ( _UpperCAmelCase : int ) -> list: # The approach is a recursive one # Base case achieved when either n = 0 or n=1 if bit_count == 0: return ["0"] if bit_count == 1: return ["0", "1"] __snake_case : Dict = 1 << bit_count # defines the length of the sequence # 1<< n is equivalent to 2^n # recursive answer will generate answer for n-1 bits __snake_case : Dict = gray_code_sequence_string(bit_count - 1 ) __snake_case : Any = [] # append 0 to first half of the smaller sequence generated for i in range(seq_len // 2 ): __snake_case : str = '0' + smaller_sequence[i] sequence.append(_UpperCAmelCase ) # append 1 to second half ... start from the end of the list for i in reversed(range(seq_len // 2 ) ): __snake_case : Any = '1' + smaller_sequence[i] sequence.append(_UpperCAmelCase ) return sequence if __name__ == "__main__": import doctest doctest.testmod()
0
1
'''simple docstring''' from __future__ import annotations import math def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : bool ,_UpperCAmelCase : list[int] ,_UpperCAmelCase : float ) -> int: if depth < 0: raise ValueError('Depth cannot be less than 0' ) if not scores: raise ValueError('Scores cannot be empty' ) if depth == height: return scores[node_index] return ( max( minimax(depth + 1 ,node_index * 2 ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) ,minimax(depth + 1 ,node_index * 2 + 1 ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) ,) if is_max else min( minimax(depth + 1 ,node_index * 2 ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) ,minimax(depth + 1 ,node_index * 2 + 1 ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) ,) ) def a_ ( ) -> None: __snake_case : Dict = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23] __snake_case : Union[str, Any] = math.log(len(_UpperCAmelCase ) ,2 ) print(f'''Optimal value : {minimax(0 ,0 ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )}''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
0
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class snake_case__ ( unittest.TestCase ): def A_ ( self : int ) -> List[Any]: '''simple docstring''' __snake_case : Any = tempfile.mkdtemp() # fmt: off __snake_case : List[str] = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest'] # fmt: on __snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) __snake_case : List[str] = { 'do_resize': True, 'size': {'height': 18, 'width': 18}, 'do_normalize': True, 'image_mean': [0.5, 0.5, 0.5], 'image_std': [0.5, 0.5, 0.5], } __snake_case : Optional[Any] = os.path.join(self.tmpdirname , __a ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(__a , __a ) def A_ ( self : Optional[int] , **__a : Dict ) -> int: '''simple docstring''' return BertTokenizer.from_pretrained(self.tmpdirname , **__a ) def A_ ( self : int , **__a : Dict ) -> Tuple: '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname , **__a ) def A_ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def A_ ( self : str ) -> List[str]: '''simple docstring''' __snake_case : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] __snake_case : List[str] = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs] return image_inputs def A_ ( self : List[str] ) -> Optional[int]: '''simple docstring''' __snake_case : Union[str, Any] = self.get_tokenizer() __snake_case : Dict = self.get_image_processor() __snake_case : Any = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) processor.save_pretrained(self.tmpdirname ) __snake_case : Any = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , __a ) def A_ ( self : str ) -> Optional[int]: '''simple docstring''' __snake_case : Optional[Any] = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __snake_case : Optional[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) __snake_case : Tuple = self.get_image_processor(do_normalize=__a , padding_value=1.0 ) __snake_case : Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __a ) def A_ ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' __snake_case : Tuple = self.get_image_processor() __snake_case : int = self.get_tokenizer() __snake_case : str = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) __snake_case : int = self.prepare_image_inputs() __snake_case : List[str] = image_processor(__a , return_tensors='np' ) __snake_case : List[str] = processor(images=__a , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def A_ ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' __snake_case : Dict = self.get_image_processor() __snake_case : int = self.get_tokenizer() __snake_case : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) __snake_case : Optional[int] = 'lower newer' __snake_case : Dict = processor(text=__a ) __snake_case : List[Any] = tokenizer(__a ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def A_ ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' __snake_case : Dict = self.get_image_processor() __snake_case : Union[str, Any] = self.get_tokenizer() __snake_case : int = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) __snake_case : List[Any] = 'lower newer' __snake_case : Optional[Any] = self.prepare_image_inputs() __snake_case : Union[str, Any] = processor(text=__a , images=__a ) self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with self.assertRaises(__a ): processor() def A_ ( self : Tuple ) -> Any: '''simple docstring''' __snake_case : Union[str, Any] = self.get_image_processor() __snake_case : Any = self.get_tokenizer() __snake_case : Dict = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) __snake_case : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __snake_case : int = processor.batch_decode(__a ) __snake_case : Optional[Any] = tokenizer.batch_decode(__a ) self.assertListEqual(__a , __a ) def A_ ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' __snake_case : List[str] = self.get_image_processor() __snake_case : Dict = self.get_tokenizer() __snake_case : Dict = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) __snake_case : Union[str, Any] = 'lower newer' __snake_case : Tuple = self.prepare_image_inputs() __snake_case : Union[str, Any] = processor(text=__a , images=__a ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
0
1
'''simple docstring''' import copy import unittest from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, LayoutLMvaConfig, LayoutLMvaForQuestionAnswering, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaModel, ) from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class snake_case__ : def __init__( self : int , __a : Tuple , __a : List[Any]=2 , __a : Union[str, Any]=3 , __a : Any=4 , __a : List[str]=2 , __a : int=7 , __a : List[str]=True , __a : Optional[int]=True , __a : Union[str, Any]=True , __a : Dict=True , __a : int=99 , __a : Union[str, Any]=36 , __a : Dict=3 , __a : Any=4 , __a : int=37 , __a : Tuple="gelu" , __a : Optional[Any]=0.1 , __a : str=0.1 , __a : Union[str, Any]=512 , __a : Union[str, Any]=16 , __a : str=2 , __a : List[Any]=0.0_2 , __a : Union[str, Any]=6 , __a : int=6 , __a : Any=3 , __a : Optional[int]=4 , __a : Union[str, Any]=None , __a : List[str]=1000 , ) -> List[Any]: '''simple docstring''' __snake_case : Tuple = parent __snake_case : Any = batch_size __snake_case : str = num_channels __snake_case : List[Any] = image_size __snake_case : Dict = patch_size __snake_case : Optional[Any] = text_seq_length __snake_case : str = is_training __snake_case : str = use_input_mask __snake_case : int = use_token_type_ids __snake_case : Dict = use_labels __snake_case : Union[str, Any] = vocab_size __snake_case : Union[str, Any] = hidden_size __snake_case : Any = num_hidden_layers __snake_case : List[str] = num_attention_heads __snake_case : Dict = intermediate_size __snake_case : Dict = hidden_act __snake_case : Tuple = hidden_dropout_prob __snake_case : List[str] = attention_probs_dropout_prob __snake_case : int = max_position_embeddings __snake_case : Dict = type_vocab_size __snake_case : Optional[Any] = type_sequence_label_size __snake_case : str = initializer_range __snake_case : Optional[int] = coordinate_size __snake_case : int = shape_size __snake_case : Any = num_labels __snake_case : Any = num_choices __snake_case : List[str] = scope __snake_case : List[Any] = range_bbox # LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token) __snake_case : str = text_seq_length __snake_case : str = (image_size // patch_size) ** 2 + 1 __snake_case : int = self.text_seq_length + self.image_seq_length def A_ ( self : Optional[int] ) -> str: '''simple docstring''' __snake_case : int = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size ) __snake_case : int = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __snake_case : str = bbox[i, j, 3] __snake_case : Dict = bbox[i, j, 1] __snake_case : Union[str, Any] = t if bbox[i, j, 2] < bbox[i, j, 0]: __snake_case : Any = bbox[i, j, 2] __snake_case : Dict = bbox[i, j, 0] __snake_case : Any = t __snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case : Optional[int] = None if self.use_input_mask: __snake_case : Tuple = random_attention_mask([self.batch_size, self.text_seq_length] ) __snake_case : Tuple = None if self.use_token_type_ids: __snake_case : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size ) __snake_case : List[Any] = None __snake_case : Optional[int] = None if self.use_labels: __snake_case : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels ) __snake_case : Optional[int] = LayoutLMvaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , ) return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels def A_ ( self : List[Any] , __a : List[str] , __a : str , __a : int , __a : Optional[Any] , __a : str , __a : Optional[int] , __a : str , __a : Dict ) -> Optional[Any]: '''simple docstring''' __snake_case : Dict = LayoutLMvaModel(config=__a ) model.to(__a ) model.eval() # text + image __snake_case : List[str] = model(__a , pixel_values=__a ) __snake_case : Optional[int] = model( __a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a ) __snake_case : List[str] = model(__a , bbox=__a , pixel_values=__a , token_type_ids=__a ) __snake_case : Tuple = model(__a , bbox=__a , pixel_values=__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # text only __snake_case : str = model(__a ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) ) # image only __snake_case : Tuple = model(pixel_values=__a ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) ) def A_ ( self : Dict , __a : Optional[Any] , __a : Optional[Any] , __a : int , __a : int , __a : List[str] , __a : List[str] , __a : Tuple , __a : List[str] ) -> Tuple: '''simple docstring''' __snake_case : Optional[int] = self.num_labels __snake_case : Union[str, Any] = LayoutLMvaForSequenceClassification(__a ) model.to(__a ) model.eval() __snake_case : List[str] = model( __a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , labels=__a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A_ ( self : int , __a : Optional[int] , __a : Dict , __a : Union[str, Any] , __a : Union[str, Any] , __a : Any , __a : List[Any] , __a : Optional[Any] , __a : Any ) -> Union[str, Any]: '''simple docstring''' __snake_case : Optional[Any] = self.num_labels __snake_case : Optional[Any] = LayoutLMvaForTokenClassification(config=__a ) model.to(__a ) model.eval() __snake_case : Tuple = model( __a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , labels=__a , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) ) def A_ ( self : Optional[Any] , __a : Dict , __a : Any , __a : List[Any] , __a : Optional[Any] , __a : Union[str, Any] , __a : Any , __a : str , __a : Tuple ) -> Tuple: '''simple docstring''' __snake_case : Any = LayoutLMvaForQuestionAnswering(config=__a ) model.to(__a ) model.eval() __snake_case : Optional[Any] = model( __a , bbox=__a , pixel_values=__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A_ ( self : Dict ) -> Dict: '''simple docstring''' __snake_case : Any = self.prepare_config_and_inputs() ( ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ( __snake_case ) , ) : Optional[Any] = config_and_inputs __snake_case : Dict = { 'input_ids': input_ids, 'bbox': bbox, 'pixel_values': pixel_values, 'token_type_ids': token_type_ids, 'attention_mask': input_mask, } return config, inputs_dict @require_torch class snake_case__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ): A__ = False A__ = False A__ = False A__ = ( ( LayoutLMvaModel, LayoutLMvaForSequenceClassification, LayoutLMvaForTokenClassification, LayoutLMvaForQuestionAnswering, ) if is_torch_available() else () ) A__ = ( {'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel} if is_torch_available() else {} ) def A_ ( self : Dict , __a : List[str] , __a : List[str] , __a : Optional[Any] , __a : Union[str, Any] , __a : Optional[Any] ) -> int: '''simple docstring''' # `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual # embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has # the sequence dimension of the text embedding only. # (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`) return True def A_ ( self : Any ) -> str: '''simple docstring''' __snake_case : Optional[Any] = LayoutLMvaModelTester(self ) __snake_case : str = ConfigTester(self , config_class=__a , hidden_size=37 ) def A_ ( self : Dict , __a : Dict , __a : int , __a : Tuple=False ) -> Union[str, Any]: '''simple docstring''' __snake_case : int = copy.deepcopy(__a ) if model_class in get_values(__a ): __snake_case : Union[str, Any] = { k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous() if isinstance(__a , torch.Tensor ) and v.ndim > 1 else v for k, v in inputs_dict.items() } if return_labels: if model_class in get_values(__a ): __snake_case : Union[str, Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=__a ) elif model_class in get_values(__a ): __snake_case : Tuple = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__a ) __snake_case : Tuple = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__a ) elif model_class in [ *get_values(__a ), ]: __snake_case : Optional[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__a ) elif model_class in [ *get_values(__a ), ]: __snake_case : Union[str, Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=__a , ) return inputs_dict def A_ ( self : Tuple ) -> Dict: '''simple docstring''' self.config_tester.run_common_tests() def A_ ( self : List[str] ) -> str: '''simple docstring''' __snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def A_ ( self : str ) -> Any: '''simple docstring''' __snake_case : List[str] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __snake_case : int = type self.model_tester.create_and_check_model(*__a ) def A_ ( self : Union[str, Any] ) -> Any: '''simple docstring''' __snake_case : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__a ) def A_ ( self : Union[str, Any] ) -> Dict: '''simple docstring''' __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__a ) def A_ ( self : List[str] ) -> List[Any]: '''simple docstring''' __snake_case : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__a ) @slow def A_ ( self : Union[str, Any] ) -> str: '''simple docstring''' for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Optional[int] = LayoutLMvaModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def a_ ( ) -> str: __snake_case : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch class snake_case__ ( unittest.TestCase ): @cached_property def A_ ( self : Any ) -> Dict: '''simple docstring''' return LayoutLMvaImageProcessor(apply_ocr=__a ) if is_vision_available() else None @slow def A_ ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' __snake_case : Optional[Any] = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(__a ) __snake_case : int = self.default_image_processor __snake_case : Tuple = prepare_img() __snake_case : Tuple = image_processor(images=__a , return_tensors='pt' ).pixel_values.to(__a ) __snake_case : Optional[int] = torch.tensor([[1, 2]] ) __snake_case : int = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 ) # forward pass __snake_case : Union[str, Any] = model( input_ids=input_ids.to(__a ) , bbox=bbox.to(__a ) , pixel_values=pixel_values.to(__a ) , ) # verify the logits __snake_case : List[str] = torch.Size((1, 199, 768) ) self.assertEqual(outputs.last_hidden_state.shape , __a ) __snake_case : int = torch.tensor( [[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(__a ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1e-4 ) )
0
'''simple docstring''' import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def a_ ( _UpperCAmelCase : List[Any] ) -> Tuple: __snake_case : str = [] embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''', f'''stage{idx}.patch_embed.proj.weight''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''', f'''stage{idx}.patch_embed.proj.bias''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''', f'''stage{idx}.patch_embed.norm.weight''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''', f'''stage{idx}.patch_embed.norm.bias''', ) ) return embed def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : Optional[int] ) -> List[str]: __snake_case : Tuple = [] attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj.bias''', ) ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') ) return attention_weights def a_ ( _UpperCAmelCase : Union[str, Any] ) -> Dict: __snake_case : Union[str, Any] = [] token.append((f'''cvt.encoder.stages.{idx}.cls_token''', 'stage2.cls_token') ) return token def a_ ( ) -> Optional[Any]: __snake_case : Any = [] head.append(('layernorm.weight', 'norm.weight') ) head.append(('layernorm.bias', 'norm.bias') ) head.append(('classifier.weight', 'head.weight') ) head.append(('classifier.bias', 'head.bias') ) return head def a_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Any ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[Any] ) -> Tuple: __snake_case : List[str] = 'imagenet-1k-id2label.json' __snake_case : Dict = 10_00 __snake_case : Union[str, Any] = 'huggingface/label-files' __snake_case : str = num_labels __snake_case : str = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase ,_UpperCAmelCase ,repo_type='dataset' ) ) ,'r' ) ) __snake_case : Tuple = {int(_UpperCAmelCase ): v for k, v in idalabel.items()} __snake_case : Optional[Any] = idalabel __snake_case : str = {v: k for k, v in idalabel.items()} __snake_case : Dict = CvtConfig(num_labels=_UpperCAmelCase ,idalabel=_UpperCAmelCase ,labelaid=_UpperCAmelCase ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit('/' ,1 )[-1][4:6] == "13": __snake_case : Tuple = [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit('/' ,1 )[-1][4:6] == "21": __snake_case : str = [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: __snake_case : Dict = [2, 2, 20] __snake_case : Any = [3, 12, 16] __snake_case : Tuple = [1_92, 7_68, 10_24] __snake_case : str = CvtForImageClassification(_UpperCAmelCase ) __snake_case : List[Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ) __snake_case : int = image_size __snake_case : int = torch.load(_UpperCAmelCase ,map_location=torch.device('cpu' ) ) __snake_case : List[Any] = OrderedDict() __snake_case : Union[str, Any] = [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: __snake_case : Optional[Any] = list_of_state_dict + cls_token(_UpperCAmelCase ) __snake_case : Tuple = list_of_state_dict + embeddings(_UpperCAmelCase ) for cnt in range(config.depth[idx] ): __snake_case : Optional[int] = list_of_state_dict + attention(_UpperCAmelCase ,_UpperCAmelCase ) __snake_case : str = list_of_state_dict + final() for gg in list_of_state_dict: print(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) ): __snake_case : List[str] = original_weights[list_of_state_dict[i][1]] model.load_state_dict(_UpperCAmelCase ) model.save_pretrained(_UpperCAmelCase ) image_processor.save_pretrained(_UpperCAmelCase ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": A__ : Dict = argparse.ArgumentParser() parser.add_argument( '''--cvt_model''', default='''cvt-w24''', type=str, help='''Name of the cvt model you\'d like to convert.''', ) parser.add_argument( '''--image_size''', default=3_8_4, type=int, help='''Input Image Size''', ) parser.add_argument( '''--cvt_file_name''', default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''', type=str, help='''Input Image Size''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) A__ : Tuple = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
0
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices A__ : List[Any] = logging.get_logger(__name__) class snake_case__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): A__ = '''maskformer-swin''' A__ = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : Any , __a : Union[str, Any]=224 , __a : Optional[int]=4 , __a : Optional[int]=3 , __a : Any=96 , __a : int=[2, 2, 6, 2] , __a : Tuple=[3, 6, 12, 24] , __a : Tuple=7 , __a : List[Any]=4.0 , __a : Any=True , __a : Dict=0.0 , __a : Optional[Any]=0.0 , __a : Dict=0.1 , __a : Union[str, Any]="gelu" , __a : Union[str, Any]=False , __a : Any=0.0_2 , __a : Union[str, Any]=1e-5 , __a : List[str]=None , __a : Dict=None , **__a : List[Any] , ) -> str: '''simple docstring''' super().__init__(**__a ) __snake_case : Optional[int] = image_size __snake_case : Any = patch_size __snake_case : List[str] = num_channels __snake_case : List[Any] = embed_dim __snake_case : Union[str, Any] = depths __snake_case : Dict = len(__a ) __snake_case : Optional[int] = num_heads __snake_case : Union[str, Any] = window_size __snake_case : Optional[Any] = mlp_ratio __snake_case : int = qkv_bias __snake_case : Union[str, Any] = hidden_dropout_prob __snake_case : Any = attention_probs_dropout_prob __snake_case : Dict = drop_path_rate __snake_case : List[str] = hidden_act __snake_case : int = use_absolute_embeddings __snake_case : Dict = layer_norm_eps __snake_case : Tuple = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __snake_case : Dict = int(embed_dim * 2 ** (len(__a ) - 1) ) __snake_case : Union[str, Any] = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(__a ) + 1 )] __snake_case , __snake_case : Optional[int] = get_aligned_output_features_output_indices( out_features=__a , out_indices=__a , stage_names=self.stage_names )
0
'''simple docstring''' from __future__ import annotations A__ : List[Any] = list[list[int]] # assigning initial values to the grid A__ : Matrix = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution A__ : Matrix = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def a_ ( _UpperCAmelCase : Matrix ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ) -> bool: for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def a_ ( _UpperCAmelCase : Matrix ) -> tuple[int, int] | None: for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def a_ ( _UpperCAmelCase : Matrix ) -> Matrix | None: if location := find_empty_location(_UpperCAmelCase ): __snake_case , __snake_case : Optional[int] = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 ,10 ): if is_safe(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ): __snake_case : Union[str, Any] = digit if sudoku(_UpperCAmelCase ) is not None: return grid __snake_case : Optional[Any] = 0 return None def a_ ( _UpperCAmelCase : Matrix ) -> None: for row in grid: for cell in row: print(_UpperCAmelCase ,end=' ' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print('''\nExample grid:\n''' + '''=''' * 2_0) print_solution(example_grid) print('''\nExample grid solution:''') A__ : List[str] = sudoku(example_grid) if solution is not None: print_solution(solution) else: print('''Cannot find a solution.''')
0
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging A__ : str = logging.get_logger(__name__) A__ : Dict = { '''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''', # See all CANINE models at https://huggingface.co/models?filter=canine } class snake_case__ ( SCREAMING_SNAKE_CASE_ ): A__ = '''canine''' def __init__( self : Optional[int] , __a : Union[str, Any]=768 , __a : Optional[int]=12 , __a : Optional[int]=12 , __a : List[str]=3072 , __a : List[str]="gelu" , __a : Union[str, Any]=0.1 , __a : str=0.1 , __a : List[str]=16384 , __a : Union[str, Any]=16 , __a : List[Any]=0.0_2 , __a : List[Any]=1e-12 , __a : List[str]=0 , __a : Dict=0Xe_0_0_0 , __a : List[str]=0Xe_0_0_1 , __a : Union[str, Any]=4 , __a : Dict=4 , __a : Optional[int]=8 , __a : Tuple=16384 , __a : Optional[int]=128 , **__a : int , ) -> List[Any]: '''simple docstring''' super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a ) __snake_case : Union[str, Any] = max_position_embeddings __snake_case : Dict = hidden_size __snake_case : Union[str, Any] = num_hidden_layers __snake_case : Tuple = num_attention_heads __snake_case : List[Any] = intermediate_size __snake_case : str = hidden_act __snake_case : Union[str, Any] = hidden_dropout_prob __snake_case : Optional[Any] = attention_probs_dropout_prob __snake_case : Optional[Any] = initializer_range __snake_case : int = type_vocab_size __snake_case : Any = layer_norm_eps # Character config: __snake_case : Optional[int] = downsampling_rate __snake_case : List[str] = upsampling_kernel_size __snake_case : List[Any] = num_hash_functions __snake_case : List[Any] = num_hash_buckets __snake_case : Any = local_transformer_stride
0
'''simple docstring''' import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import enable_full_determinism, skip_mps from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): A__ = KandinskyVaaPriorPipeline A__ = ['''prompt'''] A__ = ['''prompt''', '''negative_prompt'''] A__ = [ '''num_images_per_prompt''', '''generator''', '''num_inference_steps''', '''latents''', '''negative_prompt''', '''guidance_scale''', '''output_type''', '''return_dict''', ] A__ = False @property def A_ ( self : Dict ) -> List[str]: '''simple docstring''' return 32 @property def A_ ( self : Any ) -> str: '''simple docstring''' return 32 @property def A_ ( self : str ) -> Optional[int]: '''simple docstring''' return self.time_input_dim @property def A_ ( self : str ) -> int: '''simple docstring''' return self.time_input_dim * 4 @property def A_ ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' return 100 @property def A_ ( self : Tuple ) -> List[str]: '''simple docstring''' __snake_case : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def A_ ( self : Dict ) -> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) __snake_case : Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(__a ) @property def A_ ( self : Union[str, Any] ) -> Any: '''simple docstring''' torch.manual_seed(0 ) __snake_case : Any = { 'num_attention_heads': 2, 'attention_head_dim': 12, 'embedding_dim': self.text_embedder_hidden_size, 'num_layers': 1, } __snake_case : List[Any] = PriorTransformer(**__a ) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 __snake_case : Any = nn.Parameter(torch.ones(model.clip_std.shape ) ) return model @property def A_ ( self : List[str] ) -> List[str]: '''simple docstring''' torch.manual_seed(0 ) __snake_case : Optional[Any] = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , ) __snake_case : Optional[Any] = CLIPVisionModelWithProjection(__a ) return model @property def A_ ( self : Dict ) -> List[Any]: '''simple docstring''' __snake_case : Dict = CLIPImageProcessor( crop_size=224 , do_center_crop=__a , do_normalize=__a , do_resize=__a , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=224 , ) return image_processor def A_ ( self : Dict ) -> Optional[int]: '''simple docstring''' __snake_case : Tuple = self.dummy_prior __snake_case : List[str] = self.dummy_image_encoder __snake_case : str = self.dummy_text_encoder __snake_case : List[str] = self.dummy_tokenizer __snake_case : List[str] = self.dummy_image_processor __snake_case : Any = UnCLIPScheduler( variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=__a , clip_sample_range=1_0.0 , ) __snake_case : str = { 'prior': prior, 'image_encoder': image_encoder, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'scheduler': scheduler, 'image_processor': image_processor, } return components def A_ ( self : List[Any] , __a : Optional[Any] , __a : Tuple=0 ) -> Any: '''simple docstring''' if str(__a ).startswith('mps' ): __snake_case : List[str] = torch.manual_seed(__a ) else: __snake_case : List[str] = torch.Generator(device=__a ).manual_seed(__a ) __snake_case : List[Any] = { 'prompt': 'horse', 'generator': generator, 'guidance_scale': 4.0, 'num_inference_steps': 2, 'output_type': 'np', } return inputs def A_ ( self : str ) -> Dict: '''simple docstring''' __snake_case : str = 'cpu' __snake_case : List[str] = self.get_dummy_components() __snake_case : Tuple = self.pipeline_class(**__a ) __snake_case : Optional[Any] = pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) __snake_case : Optional[int] = pipe(**self.get_dummy_inputs(__a ) ) __snake_case : List[str] = output.image_embeds __snake_case : str = pipe( **self.get_dummy_inputs(__a ) , return_dict=__a , )[0] __snake_case : Union[str, Any] = image[0, -10:] __snake_case : Any = image_from_tuple[0, -10:] assert image.shape == (1, 32) __snake_case : List[Any] = np.array( [-0.0_5_3_2, 1.7_1_2_0, 0.3_6_5_6, -1.0_8_5_2, -0.8_9_4_6, -1.1_7_5_6, 0.4_3_4_8, 0.2_4_8_2, 0.5_1_4_6, -0.1_1_5_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def A_ ( self : Tuple ) -> Optional[int]: '''simple docstring''' __snake_case : Union[str, Any] = torch_device == 'cpu' __snake_case : Dict = True __snake_case : Union[str, Any] = False self._test_inference_batch_single_identical( test_max_difference=__a , relax_max_difference=__a , test_mean_pixel_difference=__a , ) @skip_mps def A_ ( self : str ) -> Union[str, Any]: '''simple docstring''' __snake_case : Dict = torch_device == 'cpu' __snake_case : Optional[Any] = False self._test_attention_slicing_forward_pass( test_max_difference=__a , test_mean_pixel_difference=__a , )
0
1
'''simple docstring''' import collections import importlib.util import os import re from pathlib import Path A__ : str = '''src/transformers''' # Matches is_xxx_available() A__ : str = re.compile(R'''is\_([a-z_]*)_available()''') # Catches a one-line _import_struct = {xxx} A__ : List[str] = re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] A__ : Any = re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''') # Catches a line if not is_foo_available A__ : Any = re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''') # Catches a line _import_struct["bla"].append("foo") A__ : Union[str, Any] = re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] A__ : List[str] = re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''') # Catches a line with an object between quotes and a comma: "MyModel", A__ : str = re.compile('''^\s+"([^"]+)",''') # Catches a line with objects between brackets only: ["foo", "bar"], A__ : str = re.compile('''^\s+\[([^\]]+)\]''') # Catches a line with from foo import bar, bla, boo A__ : Union[str, Any] = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''') # Catches a line with try: A__ : str = re.compile(R'''^\s*try:''') # Catches a line with else: A__ : Optional[int] = re.compile(R'''^\s*else:''') def a_ ( _UpperCAmelCase : Union[str, Any] ) -> int: if _re_test_backend.search(_UpperCAmelCase ) is None: return None __snake_case : Dict = [b[0] for b in _re_backend.findall(_UpperCAmelCase )] backends.sort() return "_and_".join(_UpperCAmelCase ) def a_ ( _UpperCAmelCase : Tuple ) -> Dict: with open(_UpperCAmelCase ,'r' ,encoding='utf-8' ,newline='\n' ) as f: __snake_case : Dict = f.readlines() __snake_case : Optional[int] = 0 while line_index < len(_UpperCAmelCase ) and not lines[line_index].startswith('_import_structure = {' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(_UpperCAmelCase ): return None # First grab the objects without a specific backend in _import_structure __snake_case : Dict = [] while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None: __snake_case : Tuple = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(_UpperCAmelCase ): __snake_case : Tuple = _re_one_line_import_struct.search(_UpperCAmelCase ).groups()[0] __snake_case : str = re.findall('\[([^\]]+)\]' ,_UpperCAmelCase ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(', ' )] ) line_index += 1 continue __snake_case : int = _re_import_struct_key_value.search(_UpperCAmelCase ) if single_line_import_search is not None: __snake_case : Dict = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(_UpperCAmelCase ) > 0] objects.extend(_UpperCAmelCase ) elif line.startswith(' ' * 8 + '"' ): objects.append(line[9:-3] ) line_index += 1 __snake_case : Union[str, Any] = {'none': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('if TYPE_CHECKING' ): # If the line is an if not is_backend_available, we grab all objects associated. __snake_case : Tuple = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __snake_case : List[str] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __snake_case : str = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ): __snake_case : List[str] = lines[line_index] if _re_import_struct_add_one.search(_UpperCAmelCase ) is not None: objects.append(_re_import_struct_add_one.search(_UpperCAmelCase ).groups()[0] ) elif _re_import_struct_add_many.search(_UpperCAmelCase ) is not None: __snake_case : Any = _re_import_struct_add_many.search(_UpperCAmelCase ).groups()[0].split(', ' ) __snake_case : Optional[Any] = [obj[1:-1] for obj in imports if len(_UpperCAmelCase ) > 0] objects.extend(_UpperCAmelCase ) elif _re_between_brackets.search(_UpperCAmelCase ) is not None: __snake_case : List[Any] = _re_between_brackets.search(_UpperCAmelCase ).groups()[0].split(', ' ) __snake_case : List[Any] = [obj[1:-1] for obj in imports if len(_UpperCAmelCase ) > 0] objects.extend(_UpperCAmelCase ) elif _re_quote_object.search(_UpperCAmelCase ) is not None: objects.append(_re_quote_object.search(_UpperCAmelCase ).groups()[0] ) elif line.startswith(' ' * 8 + '"' ): objects.append(line[9:-3] ) elif line.startswith(' ' * 12 + '"' ): objects.append(line[13:-3] ) line_index += 1 __snake_case : Tuple = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend __snake_case : str = [] while ( line_index < len(_UpperCAmelCase ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('else' ) ): __snake_case : Any = lines[line_index] __snake_case : Tuple = _re_import.search(_UpperCAmelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(', ' ) ) elif line.startswith(' ' * 8 ): objects.append(line[8:-2] ) line_index += 1 __snake_case : List[str] = {'none': objects} # Let's continue with backend-specific objects while line_index < len(_UpperCAmelCase ): # If the line is an if is_backend_available, we grab all objects associated. __snake_case : List[Any] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __snake_case : Dict = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __snake_case : Optional[Any] = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ): __snake_case : Optional[int] = lines[line_index] __snake_case : Optional[int] = _re_import.search(_UpperCAmelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(', ' ) ) elif line.startswith(' ' * 12 ): objects.append(line[12:-2] ) line_index += 1 __snake_case : Optional[Any] = objects else: line_index += 1 return import_dict_objects, type_hint_objects def a_ ( _UpperCAmelCase : Dict ,_UpperCAmelCase : List[Any] ) -> Dict: def find_duplicates(_UpperCAmelCase : Tuple ): return [k for k, v in collections.Counter(_UpperCAmelCase ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] __snake_case : Optional[Any] = [] for key in import_dict_objects.keys(): __snake_case : List[Any] = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) __snake_case : Optional[int] = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): __snake_case : Tuple = 'base imports' if key == 'none' else f'''{key} backend''' errors.append(f'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def a_ ( ) -> int: __snake_case : Tuple = [] for root, _, files in os.walk(_UpperCAmelCase ): if "__init__.py" in files: __snake_case : Any = os.path.join(_UpperCAmelCase ,'__init__.py' ) __snake_case : Union[str, Any] = parse_init(_UpperCAmelCase ) if objects is not None: __snake_case : Tuple = analyze_results(*_UpperCAmelCase ) if len(_UpperCAmelCase ) > 0: __snake_case : Any = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append('\n'.join(_UpperCAmelCase ) ) if len(_UpperCAmelCase ) > 0: raise ValueError('\n\n'.join(_UpperCAmelCase ) ) def a_ ( ) -> Any: __snake_case : Optional[int] = [] for path, directories, files in os.walk(_UpperCAmelCase ): for folder in directories: # Ignore private modules if folder.startswith('_' ): directories.remove(_UpperCAmelCase ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(_UpperCAmelCase ) / folder).glob('*.py' ) ) ) == 0: continue __snake_case : Dict = str((Path(_UpperCAmelCase ) / folder).relative_to(_UpperCAmelCase ) ) __snake_case : Any = short_path.replace(os.path.sep ,'.' ) submodules.append(_UpperCAmelCase ) for fname in files: if fname == "__init__.py": continue __snake_case : Any = str((Path(_UpperCAmelCase ) / fname).relative_to(_UpperCAmelCase ) ) __snake_case : Any = short_path.replace('.py' ,'' ).replace(os.path.sep ,'.' ) if len(submodule.split('.' ) ) == 1: submodules.append(_UpperCAmelCase ) return submodules A__ : Tuple = [ '''convert_pytorch_checkpoint_to_tf2''', '''modeling_flax_pytorch_utils''', ] def a_ ( ) -> Any: # This is to make sure the transformers module imported is the one in the repo. __snake_case : Any = importlib.util.spec_from_file_location( 'transformers' ,os.path.join(_UpperCAmelCase ,'__init__.py' ) ,submodule_search_locations=[PATH_TO_TRANSFORMERS] ,) __snake_case : Tuple = spec.loader.load_module() __snake_case : Optional[int] = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(_UpperCAmelCase ) > 0: __snake_case : Any = '\n'.join(f'''- {module}''' for module in module_not_registered ) raise ValueError( 'The following submodules are not properly registered in the main init of Transformers:\n' f'''{list_of_modules}\n''' 'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' ) if __name__ == "__main__": check_all_inits() check_submodules()
0
'''simple docstring''' from math import factorial A__ : dict[str, int] = {str(digit): factorial(digit) for digit in range(1_0)} def a_ ( _UpperCAmelCase : int ) -> int: if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ): raise TypeError('Parameter number must be int' ) if number < 0: raise ValueError('Parameter number must be greater than or equal to 0' ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(_UpperCAmelCase ) ) def a_ ( _UpperCAmelCase : int = 60 ,_UpperCAmelCase : int = 1_00_00_00 ) -> int: if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ) or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ): raise TypeError('Parameters chain_length and number_limit must be int' ) if chain_length <= 0 or number_limit <= 0: raise ValueError( 'Parameters chain_length and number_limit must be greater than 0' ) # the counter for the chains with the exact desired length __snake_case : List[str] = 0 # the cached sizes of the previous chains __snake_case : dict[int, int] = {} for start_chain_element in range(1 ,_UpperCAmelCase ): # The temporary set will contain the elements of the chain __snake_case : Optional[int] = set() __snake_case : List[Any] = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. __snake_case : str = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(_UpperCAmelCase ) chain_set_length += 1 __snake_case : Tuple = digit_factorial_sum(_UpperCAmelCase ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] __snake_case : Optional[Any] = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(F"""{solution()}""")
0
1
'''simple docstring''' def a_ ( _UpperCAmelCase : int ) -> bool: __snake_case : Union[str, Any] = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(2_7)) print(perfect_cube(4))
0
'''simple docstring''' def a_ ( _UpperCAmelCase : int = 1_00 ) -> int: __snake_case : Any = n * (n + 1) * (2 * n + 1) / 6 __snake_case : Union[str, Any] = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F"""{solution() = }""")
0
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging A__ : Any = logging.get_logger(__name__) A__ : Any = { '''naver-clova-ix/donut-base''': '''https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json''', # See all Donut models at https://huggingface.co/models?filter=donut-swin } class snake_case__ ( SCREAMING_SNAKE_CASE_ ): A__ = '''donut-swin''' A__ = { '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self : str , __a : str=224 , __a : List[Any]=4 , __a : Optional[int]=3 , __a : Union[str, Any]=96 , __a : List[str]=[2, 2, 6, 2] , __a : List[str]=[3, 6, 12, 24] , __a : str=7 , __a : Tuple=4.0 , __a : Tuple=True , __a : Any=0.0 , __a : List[str]=0.0 , __a : Union[str, Any]=0.1 , __a : Optional[Any]="gelu" , __a : List[Any]=False , __a : List[Any]=0.0_2 , __a : List[str]=1e-5 , **__a : Any , ) -> Optional[int]: '''simple docstring''' super().__init__(**__a ) __snake_case : Optional[Any] = image_size __snake_case : int = patch_size __snake_case : Optional[Any] = num_channels __snake_case : List[Any] = embed_dim __snake_case : List[Any] = depths __snake_case : Dict = len(__a ) __snake_case : Dict = num_heads __snake_case : Any = window_size __snake_case : Tuple = mlp_ratio __snake_case : Any = qkv_bias __snake_case : str = hidden_dropout_prob __snake_case : int = attention_probs_dropout_prob __snake_case : List[str] = drop_path_rate __snake_case : str = hidden_act __snake_case : Union[str, Any] = use_absolute_embeddings __snake_case : Optional[Any] = layer_norm_eps __snake_case : List[str] = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __snake_case : int = int(embed_dim * 2 ** (len(__a ) - 1) )
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available A__ : int = { '''configuration_groupvit''': [ '''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GroupViTConfig''', '''GroupViTOnnxConfig''', '''GroupViTTextConfig''', '''GroupViTVisionConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Tuple = [ '''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GroupViTModel''', '''GroupViTPreTrainedModel''', '''GroupViTTextModel''', '''GroupViTVisionModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Optional[int] = [ '''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFGroupViTModel''', '''TFGroupViTPreTrainedModel''', '''TFGroupViTTextModel''', '''TFGroupViTVisionModel''', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys A__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
0
1
'''simple docstring''' import collections.abc from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_poolformer import PoolFormerConfig A__ : Optional[Any] = logging.get_logger(__name__) # General docstring A__ : List[str] = '''PoolFormerConfig''' # Base docstring A__ : List[Any] = '''sail/poolformer_s12''' A__ : Optional[int] = [1, 5_1_2, 7, 7] # Image classification docstring A__ : str = '''sail/poolformer_s12''' A__ : List[Any] = '''tabby, tabby cat''' A__ : Union[str, Any] = [ '''sail/poolformer_s12''', # See all PoolFormer models at https://huggingface.co/models?filter=poolformer ] def a_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : float = 0.0 ,_UpperCAmelCase : bool = False ) -> int: if drop_prob == 0.0 or not training: return input __snake_case : List[str] = 1 - drop_prob __snake_case : int = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets __snake_case : Optional[Any] = keep_prob + torch.rand(_UpperCAmelCase ,dtype=input.dtype ,device=input.device ) random_tensor.floor_() # binarize __snake_case : Any = input.div(_UpperCAmelCase ) * random_tensor return output class snake_case__ ( nn.Module ): def __init__( self : str , __a : Optional[float] = None ) -> None: '''simple docstring''' super().__init__() __snake_case : int = drop_prob def A_ ( self : Any , __a : torch.Tensor ) -> torch.Tensor: '''simple docstring''' return drop_path(__a , self.drop_prob , self.training ) def A_ ( self : Any ) -> str: '''simple docstring''' return "p={}".format(self.drop_prob ) class snake_case__ ( nn.Module ): def __init__( self : Optional[int] , __a : int , __a : int , __a : List[str] , __a : Dict , __a : Dict , __a : str=None ) -> Tuple: '''simple docstring''' super().__init__() __snake_case : Union[str, Any] = patch_size if isinstance(__a , collections.abc.Iterable ) else (patch_size, patch_size) __snake_case : int = stride if isinstance(__a , collections.abc.Iterable ) else (stride, stride) __snake_case : str = padding if isinstance(__a , collections.abc.Iterable ) else (padding, padding) __snake_case : str = nn.Convad(__a , __a , kernel_size=__a , stride=__a , padding=__a ) __snake_case : Tuple = norm_layer(__a ) if norm_layer else nn.Identity() def A_ ( self : Optional[Any] , __a : int ) -> str: '''simple docstring''' __snake_case : str = self.projection(__a ) __snake_case : int = self.norm(__a ) return embeddings class snake_case__ ( nn.GroupNorm ): def __init__( self : List[Any] , __a : List[str] , **__a : Any ) -> Optional[Any]: '''simple docstring''' super().__init__(1 , __a , **__a ) class snake_case__ ( nn.Module ): def __init__( self : str , __a : Dict ) -> Any: '''simple docstring''' super().__init__() __snake_case : Dict = nn.AvgPoolad(__a , stride=1 , padding=pool_size // 2 , count_include_pad=__a ) def A_ ( self : Any , __a : Dict ) -> List[Any]: '''simple docstring''' return self.pool(__a ) - hidden_states class snake_case__ ( nn.Module ): def __init__( self : Dict , __a : Union[str, Any] , __a : Tuple , __a : List[str] , __a : int ) -> Optional[Any]: '''simple docstring''' super().__init__() __snake_case : str = nn.Convad(__a , __a , 1 ) __snake_case : Optional[int] = nn.Convad(__a , __a , 1 ) __snake_case : Union[str, Any] = PoolFormerDropPath(__a ) if isinstance(config.hidden_act , __a ): __snake_case : Union[str, Any] = ACTaFN[config.hidden_act] else: __snake_case : Dict = config.hidden_act def A_ ( self : List[Any] , __a : List[str] ) -> Optional[Any]: '''simple docstring''' __snake_case : int = self.conva(__a ) __snake_case : Tuple = self.act_fn(__a ) __snake_case : Optional[Any] = self.drop(__a ) __snake_case : Optional[int] = self.conva(__a ) __snake_case : int = self.drop(__a ) return hidden_states class snake_case__ ( nn.Module ): def __init__( self : str , __a : Dict , __a : Optional[int] , __a : Any , __a : Any , __a : List[str] , __a : Dict ) -> List[Any]: '''simple docstring''' super().__init__() __snake_case : Tuple = PoolFormerPooling(__a ) __snake_case : Dict = PoolFormerOutput(__a , __a , __a , __a ) __snake_case : str = PoolFormerGroupNorm(__a ) __snake_case : Optional[int] = PoolFormerGroupNorm(__a ) # Useful for training neural nets __snake_case : Dict = PoolFormerDropPath(__a ) if drop_path > 0.0 else nn.Identity() __snake_case : Union[str, Any] = config.use_layer_scale if config.use_layer_scale: __snake_case : List[str] = nn.Parameter( config.layer_scale_init_value * torch.ones((__a) ) , requires_grad=__a ) __snake_case : Any = nn.Parameter( config.layer_scale_init_value * torch.ones((__a) ) , requires_grad=__a ) def A_ ( self : Any , __a : List[str] ) -> Dict: '''simple docstring''' if self.use_layer_scale: __snake_case : Union[str, Any] = self.pooling(self.before_norm(__a ) ) __snake_case : Union[str, Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output # First residual connection __snake_case : Tuple = hidden_states + self.drop_path(__a ) __snake_case : Union[str, Any] = () __snake_case : Optional[Any] = self.output(self.after_norm(__a ) ) __snake_case : str = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output # Second residual connection __snake_case : Optional[Any] = hidden_states + self.drop_path(__a ) __snake_case : str = (output,) + outputs return outputs else: __snake_case : List[Any] = self.drop_path(self.pooling(self.before_norm(__a ) ) ) # First residual connection __snake_case : List[Any] = pooling_output + hidden_states __snake_case : Optional[Any] = () # Second residual connection inside the PoolFormerOutput block __snake_case : Union[str, Any] = self.drop_path(self.output(self.after_norm(__a ) ) ) __snake_case : Union[str, Any] = hidden_states + layer_output __snake_case : Optional[Any] = (output,) + outputs return outputs class snake_case__ ( nn.Module ): def __init__( self : Union[str, Any] , __a : str ) -> Dict: '''simple docstring''' super().__init__() __snake_case : Union[str, Any] = config # stochastic depth decay rule __snake_case : List[Any] = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )] # patch embeddings __snake_case : List[str] = [] for i in range(config.num_encoder_blocks ): embeddings.append( PoolFormerEmbeddings( patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) ) __snake_case : List[Any] = nn.ModuleList(__a ) # Transformer blocks __snake_case : Dict = [] __snake_case : Optional[Any] = 0 for i in range(config.num_encoder_blocks ): # each block consists of layers __snake_case : Optional[Any] = [] if i != 0: cur += config.depths[i - 1] for j in range(config.depths[i] ): layers.append( PoolFormerLayer( __a , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) ) blocks.append(nn.ModuleList(__a ) ) __snake_case : Dict = nn.ModuleList(__a ) def A_ ( self : Union[str, Any] , __a : str , __a : Optional[Any]=False , __a : List[Any]=True ) -> int: '''simple docstring''' __snake_case : List[str] = () if output_hidden_states else None __snake_case : Any = pixel_values for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ): __snake_case , __snake_case : str = layers # Get patch embeddings from hidden_states __snake_case : List[Any] = embedding_layer(__a ) # Send the embeddings through the blocks for _, blk in enumerate(__a ): __snake_case : str = blk(__a ) __snake_case : Tuple = layer_outputs[0] if output_hidden_states: __snake_case : List[str] = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states] if v is not None ) return BaseModelOutputWithNoAttention(last_hidden_state=__a , hidden_states=__a ) class snake_case__ ( SCREAMING_SNAKE_CASE_ ): A__ = PoolFormerConfig A__ = '''poolformer''' A__ = '''pixel_values''' A__ = True def A_ ( self : List[str] , __a : Optional[Any] ) -> Optional[Any]: '''simple docstring''' if isinstance(__a , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(__a , nn.LayerNorm ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) def A_ ( self : int , __a : Optional[Any] , __a : Dict=False ) -> Tuple: '''simple docstring''' if isinstance(__a , __a ): __snake_case : List[Any] = value A__ : str = R''' This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' A__ : Dict = R''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`PoolFormerImageProcessor.__call__`] for details. ''' @add_start_docstrings( '''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''' , SCREAMING_SNAKE_CASE_ , ) class snake_case__ ( SCREAMING_SNAKE_CASE_ ): def __init__( self : int , __a : List[str] ) -> List[str]: '''simple docstring''' super().__init__(__a ) __snake_case : int = config __snake_case : Optional[int] = PoolFormerEncoder(__a ) # Initialize weights and apply final processing self.post_init() def A_ ( self : List[Any] ) -> List[str]: '''simple docstring''' return self.embeddings.patch_embeddings @add_start_docstrings_to_model_forward(__a ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__a , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def A_ ( self : Optional[Any] , __a : Optional[torch.FloatTensor] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]: '''simple docstring''' __snake_case : str = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __snake_case : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values' ) __snake_case : str = self.encoder( __a , output_hidden_states=__a , return_dict=__a , ) __snake_case : Union[str, Any] = encoder_outputs[0] if not return_dict: return (sequence_output, None) + encoder_outputs[1:] return BaseModelOutputWithNoAttention( last_hidden_state=__a , hidden_states=encoder_outputs.hidden_states , ) class snake_case__ ( nn.Module ): def __init__( self : Optional[int] , __a : List[str] ) -> Optional[Any]: '''simple docstring''' super().__init__() __snake_case : str = nn.Linear(config.hidden_size , config.hidden_size ) def A_ ( self : Dict , __a : Optional[Any] ) -> Tuple: '''simple docstring''' __snake_case : Any = self.dense(__a ) return output @add_start_docstrings( ''' PoolFormer Model transformer with an image classification head on top ''' , SCREAMING_SNAKE_CASE_ , ) class snake_case__ ( SCREAMING_SNAKE_CASE_ ): def __init__( self : Union[str, Any] , __a : str ) -> List[str]: '''simple docstring''' super().__init__(__a ) __snake_case : Any = config.num_labels __snake_case : List[str] = PoolFormerModel(__a ) # Final norm __snake_case : Union[str, Any] = PoolFormerGroupNorm(config.hidden_sizes[-1] ) # Classifier head __snake_case : Tuple = ( nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__a ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def A_ ( self : Tuple , __a : Optional[torch.FloatTensor] = None , __a : Optional[torch.LongTensor] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]: '''simple docstring''' __snake_case : Tuple = return_dict if return_dict is not None else self.config.use_return_dict __snake_case : List[str] = self.poolformer( __a , output_hidden_states=__a , return_dict=__a , ) __snake_case : Optional[Any] = outputs[0] __snake_case : str = self.classifier(self.norm(__a ).mean([-2, -1] ) ) __snake_case : Tuple = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: __snake_case : List[str] = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): __snake_case : Dict = 'single_label_classification' else: __snake_case : Tuple = 'multi_label_classification' if self.config.problem_type == "regression": __snake_case : Any = MSELoss() if self.num_labels == 1: __snake_case : Dict = loss_fct(logits.squeeze() , labels.squeeze() ) else: __snake_case : Any = loss_fct(__a , __a ) elif self.config.problem_type == "single_label_classification": __snake_case : int = CrossEntropyLoss() __snake_case : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": __snake_case : Optional[Any] = BCEWithLogitsLoss() __snake_case : str = loss_fct(__a , __a ) if not return_dict: __snake_case : Dict = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention(loss=__a , logits=__a , hidden_states=outputs.hidden_states )
0
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): A__ = ShapEPipeline A__ = ['''prompt'''] A__ = ['''prompt'''] A__ = [ '''num_images_per_prompt''', '''num_inference_steps''', '''generator''', '''latents''', '''guidance_scale''', '''frame_size''', '''output_type''', '''return_dict''', ] A__ = False @property def A_ ( self : Optional[Any] ) -> str: '''simple docstring''' return 32 @property def A_ ( self : str ) -> Optional[int]: '''simple docstring''' return 32 @property def A_ ( self : Tuple ) -> List[Any]: '''simple docstring''' return self.time_input_dim * 4 @property def A_ ( self : Tuple ) -> Dict: '''simple docstring''' return 8 @property def A_ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' __snake_case : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def A_ ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' torch.manual_seed(0 ) __snake_case : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(__a ) @property def A_ ( self : Union[str, Any] ) -> int: '''simple docstring''' torch.manual_seed(0 ) __snake_case : Dict = { 'num_attention_heads': 2, 'attention_head_dim': 16, 'embedding_dim': self.time_input_dim, 'num_embeddings': 32, 'embedding_proj_dim': self.text_embedder_hidden_size, 'time_embed_dim': self.time_embed_dim, 'num_layers': 1, 'clip_embed_dim': self.time_input_dim * 2, 'additional_embeddings': 0, 'time_embed_act_fn': 'gelu', 'norm_in_type': 'layer', 'encoder_hid_proj_type': None, 'added_emb_type': None, } __snake_case : Optional[Any] = PriorTransformer(**__a ) return model @property def A_ ( self : Dict ) -> Dict: '''simple docstring''' torch.manual_seed(0 ) __snake_case : Tuple = { 'param_shapes': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), 'd_latent': self.time_input_dim, 'd_hidden': self.renderer_dim, 'n_output': 12, 'background': ( 0.1, 0.1, 0.1, ), } __snake_case : Optional[int] = ShapERenderer(**__a ) return model def A_ ( self : Tuple ) -> Tuple: '''simple docstring''' __snake_case : Tuple = self.dummy_prior __snake_case : Union[str, Any] = self.dummy_text_encoder __snake_case : List[str] = self.dummy_tokenizer __snake_case : Optional[Any] = self.dummy_renderer __snake_case : List[Any] = HeunDiscreteScheduler( beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=__a , clip_sample=__a , clip_sample_range=1.0 , ) __snake_case : int = { 'prior': prior, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'renderer': renderer, 'scheduler': scheduler, } return components def A_ ( self : Union[str, Any] , __a : Dict , __a : int=0 ) -> Optional[Any]: '''simple docstring''' if str(__a ).startswith('mps' ): __snake_case : List[str] = torch.manual_seed(__a ) else: __snake_case : Optional[Any] = torch.Generator(device=__a ).manual_seed(__a ) __snake_case : Optional[int] = { 'prompt': 'horse', 'generator': generator, 'num_inference_steps': 1, 'frame_size': 32, 'output_type': 'np', } return inputs def A_ ( self : List[Any] ) -> List[Any]: '''simple docstring''' __snake_case : Dict = 'cpu' __snake_case : Dict = self.get_dummy_components() __snake_case : int = self.pipeline_class(**__a ) __snake_case : str = pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) __snake_case : Optional[Any] = pipe(**self.get_dummy_inputs(__a ) ) __snake_case : Dict = output.images[0] __snake_case : int = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) __snake_case : str = np.array( [ 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def A_ ( self : Any ) -> List[str]: '''simple docstring''' # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def A_ ( self : int ) -> Tuple: '''simple docstring''' __snake_case : int = torch_device == 'cpu' __snake_case : str = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=__a , relax_max_difference=__a , ) def A_ ( self : List[str] ) -> Dict: '''simple docstring''' __snake_case : str = self.get_dummy_components() __snake_case : Tuple = self.pipeline_class(**__a ) __snake_case : Dict = pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) __snake_case : int = 1 __snake_case : Tuple = 2 __snake_case : Tuple = self.get_dummy_inputs(__a ) for key in inputs.keys(): if key in self.batch_params: __snake_case : Union[str, Any] = batch_size * [inputs[key]] __snake_case : str = pipe(**__a , num_images_per_prompt=__a )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class snake_case__ ( unittest.TestCase ): def A_ ( self : str ) -> Dict: '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def A_ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' __snake_case : Optional[int] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/test_shap_e_np_out.npy' ) __snake_case : Union[str, Any] = ShapEPipeline.from_pretrained('openai/shap-e' ) __snake_case : Any = pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) __snake_case : Optional[int] = torch.Generator(device=__a ).manual_seed(0 ) __snake_case : Union[str, Any] = pipe( 'a shark' , generator=__a , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(__a , __a )
0
1
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL A__ : int = logging.get_logger(__name__) class snake_case__ ( SCREAMING_SNAKE_CASE_ ): A__ = ['''pixel_values'''] def __init__( self : Tuple , __a : bool = True , __a : Dict[str, int] = None , __a : int = 0.9 , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : bool = True , __a : Dict[str, int] = None , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , **__a : Dict , ) -> None: '''simple docstring''' super().__init__(**__a ) __snake_case : str = size if size is not None else {'shortest_edge': 224} __snake_case : List[str] = get_size_dict(__a , default_to_square=__a ) __snake_case : Tuple = crop_size if crop_size is not None else {'height': 224, 'width': 224} __snake_case : Optional[int] = get_size_dict(__a , param_name='crop_size' ) __snake_case : List[str] = do_resize __snake_case : int = size __snake_case : Dict = crop_pct __snake_case : List[Any] = resample __snake_case : List[str] = do_center_crop __snake_case : Optional[int] = crop_size __snake_case : str = do_rescale __snake_case : Union[str, Any] = rescale_factor __snake_case : Any = do_normalize __snake_case : Dict = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN __snake_case : Optional[int] = image_std if image_std is not None else IMAGENET_DEFAULT_STD def A_ ( self : Dict , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[float] = None , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Any , ) -> np.ndarray: '''simple docstring''' __snake_case : Dict = get_size_dict(__a , default_to_square=__a ) if "shortest_edge" not in size and ("height" not in size or "width" not in size): raise ValueError(f'''size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' ) if crop_pct is not None: if "shortest_edge" in size: __snake_case : List[Any] = int(size['shortest_edge'] / crop_pct ) elif "height" in size and "width" in size: if size["height"] == size["width"]: __snake_case : int = int(size['height'] / crop_pct ) else: __snake_case : List[str] = (int(size['height'] / crop_pct ), int(size['width'] / crop_pct )) else: raise ValueError('Invalid size for resize: {}'.format(__a ) ) __snake_case : Optional[Any] = get_resize_output_image_size(__a , size=__a , default_to_square=__a ) else: if "shortest_edge" in size: __snake_case : Dict = get_resize_output_image_size(__a , size=size['shortest_edge'] , default_to_square=__a ) elif "height" in size and "width" in size: __snake_case : int = (size['height'], size['width']) else: raise ValueError('Invalid size for resize: {}'.format(__a ) ) return resize(__a , size=__a , resample=__a , data_format=__a , **__a ) def A_ ( self : Optional[Any] , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[Any] , ) -> np.ndarray: '''simple docstring''' __snake_case : Dict = get_size_dict(__a ) if "height" not in size or "width" not in size: raise ValueError(f'''size must contain \'height\' and \'width\' as keys. Got {size.keys()}''' ) return center_crop(__a , size=(size['height'], size['width']) , data_format=__a , **__a ) def A_ ( self : Tuple , __a : np.ndarray , __a : Union[int, float] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[Any] , ) -> Any: '''simple docstring''' return rescale(__a , scale=__a , data_format=__a , **__a ) def A_ ( self : Tuple , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Union[str, Any] , ) -> np.ndarray: '''simple docstring''' return normalize(__a , mean=__a , std=__a , data_format=__a , **__a ) def A_ ( self : Optional[Any] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : int = None , __a : PILImageResampling = None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[str, TensorType]] = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : Union[str, Any] , ) -> PIL.Image.Image: '''simple docstring''' __snake_case : Union[str, Any] = do_resize if do_resize is not None else self.do_resize __snake_case : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct __snake_case : Any = resample if resample is not None else self.resample __snake_case : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop __snake_case : List[Any] = do_rescale if do_rescale is not None else self.do_rescale __snake_case : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor __snake_case : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize __snake_case : int = image_mean if image_mean is not None else self.image_mean __snake_case : str = image_std if image_std is not None else self.image_std __snake_case : int = size if size is not None else self.size __snake_case : Optional[int] = get_size_dict(__a , default_to_square=__a ) __snake_case : Tuple = crop_size if crop_size is not None else self.crop_size __snake_case : Tuple = get_size_dict(__a , param_name='crop_size' ) __snake_case : Dict = make_list_of_images(__a ) if not valid_images(__a ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_center_crop and crop_pct is None: raise ValueError('Crop_pct must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. __snake_case : int = [to_numpy_array(__a ) for image in images] if do_resize: __snake_case : Dict = [self.resize(image=__a , size=__a , crop_pct=__a , resample=__a ) for image in images] if do_center_crop: __snake_case : Union[str, Any] = [self.center_crop(image=__a , size=__a ) for image in images] if do_rescale: __snake_case : int = [self.rescale(image=__a , scale=__a ) for image in images] if do_normalize: __snake_case : Union[str, Any] = [self.normalize(image=__a , mean=__a , std=__a ) for image in images] __snake_case : List[Any] = [to_channel_dimension_format(__a , __a ) for image in images] __snake_case : Optional[int] = {'pixel_values': images} return BatchFeature(data=__a , tensor_type=__a )
0
'''simple docstring''' from __future__ import annotations import time import numpy as np A__ : str = [8, 5, 9, 7] A__ : List[str] = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] A__ : Dict = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class snake_case__ : def __init__( self : Union[str, Any] , __a : list[int] , __a : list[list[int]] , __a : list[list[int]] , ) -> None: '''simple docstring''' __snake_case : int = claim_vector __snake_case : Optional[int] = allocated_resources_table __snake_case : List[str] = maximum_claim_table def A_ ( self : str ) -> list[int]: '''simple docstring''' return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def A_ ( self : int ) -> list[int]: '''simple docstring''' return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def A_ ( self : int ) -> list[list[int]]: '''simple docstring''' return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(__a ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def A_ ( self : str ) -> dict[int, list[int]]: '''simple docstring''' return {self.__need().index(__a ): i for i in self.__need()} def A_ ( self : Union[str, Any] , **__a : int ) -> None: '''simple docstring''' __snake_case : str = self.__need() __snake_case : List[Any] = self.__allocated_resources_table __snake_case : Optional[int] = self.__available_resources() __snake_case : Union[str, Any] = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print('_' * 50 + '\n' ) while need_list: __snake_case : Tuple = False for each_need in need_list: __snake_case : Any = True for index, need in enumerate(__a ): if need > available_resources[index]: __snake_case : List[str] = False break if execution: __snake_case : Union[str, Any] = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: __snake_case : str = original_need_index print(f'''Process {process_number + 1} is executing.''' ) # remove the process run from stack need_list.remove(__a ) # update available/freed resources stack __snake_case : Union[str, Any] = np.array(__a ) + np.array( alloc_resources_table[process_number] ) print( 'Updated available resource stack for processes: ' + ' '.join([str(__a ) for x in available_resources] ) ) break if safe: print('The process is in a safe state.\n' ) else: print('System in unsafe state. Aborting...\n' ) break def A_ ( self : List[str] ) -> Optional[int]: '''simple docstring''' print(' ' * 9 + 'Allocated Resource Table' ) for item in self.__allocated_resources_table: print( f'''P{self.__allocated_resources_table.index(__a ) + 1}''' + ' '.join(f'''{it:>8}''' for it in item ) + '\n' ) print(' ' * 9 + 'System Resource Table' ) for item in self.__maximum_claim_table: print( f'''P{self.__maximum_claim_table.index(__a ) + 1}''' + ' '.join(f'''{it:>8}''' for it in item ) + '\n' ) print( 'Current Usage by Active Processes: ' + ' '.join(str(__a ) for x in self.__claim_vector ) ) print( 'Initial Available Resources: ' + ' '.join(str(__a ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
0
1
'''simple docstring''' import argparse import numpy as np import torch from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging logging.set_verbosity_info() A__ : Dict = logging.get_logger('''transformers.models.speecht5''') def a_ ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : List[str] ) -> int: hf_model.apply_weight_norm() __snake_case : List[Any] = checkpoint['input_conv.weight_g'] __snake_case : Optional[int] = checkpoint['input_conv.weight_v'] __snake_case : int = checkpoint['input_conv.bias'] for i in range(len(config.upsample_rates ) ): __snake_case : List[Any] = checkpoint[f'''upsamples.{i}.1.weight_g'''] __snake_case : List[str] = checkpoint[f'''upsamples.{i}.1.weight_v'''] __snake_case : Any = checkpoint[f'''upsamples.{i}.1.bias'''] for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ): for j in range(len(config.resblock_dilation_sizes ) ): __snake_case : Any = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g'''] __snake_case : Dict = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v'''] __snake_case : Dict = checkpoint[f'''blocks.{i}.convs1.{j}.1.bias'''] __snake_case : Optional[int] = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g'''] __snake_case : Any = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v'''] __snake_case : Optional[int] = checkpoint[f'''blocks.{i}.convs2.{j}.1.bias'''] __snake_case : List[Any] = checkpoint['output_conv.1.weight_g'] __snake_case : Any = checkpoint['output_conv.1.weight_v'] __snake_case : List[Any] = checkpoint['output_conv.1.bias'] hf_model.remove_weight_norm() @torch.no_grad() def a_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Optional[int]=None ,_UpperCAmelCase : Dict=None ,) -> Dict: if config_path is not None: __snake_case : Tuple = SpeechTaHifiGanConfig.from_pretrained(_UpperCAmelCase ) else: __snake_case : Union[str, Any] = SpeechTaHifiGanConfig() __snake_case : Optional[Any] = SpeechTaHifiGan(_UpperCAmelCase ) __snake_case : Dict = torch.load(_UpperCAmelCase ) load_weights(orig_checkpoint['model']['generator'] ,_UpperCAmelCase ,_UpperCAmelCase ) __snake_case : List[Any] = np.load(_UpperCAmelCase ) __snake_case : Optional[int] = stats[0].reshape(-1 ) __snake_case : Dict = stats[1].reshape(-1 ) __snake_case : str = torch.from_numpy(_UpperCAmelCase ).float() __snake_case : str = torch.from_numpy(_UpperCAmelCase ).float() model.save_pretrained(_UpperCAmelCase ) if repo_id: print('Pushing to the hub...' ) model.push_to_hub(_UpperCAmelCase ) if __name__ == "__main__": A__ : Optional[Any] = argparse.ArgumentParser() parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''') parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) A__ : Optional[int] = parser.parse_args() convert_hifigan_checkpoint( args.checkpoint_path, args.stats_path, args.pytorch_dump_folder_path, args.config_path, args.push_to_hub, )
0
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_electra import ElectraTokenizer A__ : Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} A__ : List[Any] = { '''vocab_file''': { '''google/electra-small-generator''': ( '''https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt''' ), '''google/electra-base-generator''': '''https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt''', '''google/electra-large-generator''': ( '''https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt''' ), '''google/electra-small-discriminator''': ( '''https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt''' ), '''google/electra-base-discriminator''': ( '''https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt''' ), '''google/electra-large-discriminator''': ( '''https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''google/electra-small-generator''': ( '''https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json''' ), '''google/electra-base-generator''': ( '''https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json''' ), '''google/electra-large-generator''': ( '''https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json''' ), '''google/electra-small-discriminator''': ( '''https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json''' ), '''google/electra-base-discriminator''': ( '''https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json''' ), '''google/electra-large-discriminator''': ( '''https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json''' ), }, } A__ : List[Any] = { '''google/electra-small-generator''': 5_1_2, '''google/electra-base-generator''': 5_1_2, '''google/electra-large-generator''': 5_1_2, '''google/electra-small-discriminator''': 5_1_2, '''google/electra-base-discriminator''': 5_1_2, '''google/electra-large-discriminator''': 5_1_2, } A__ : Optional[Any] = { '''google/electra-small-generator''': {'''do_lower_case''': True}, '''google/electra-base-generator''': {'''do_lower_case''': True}, '''google/electra-large-generator''': {'''do_lower_case''': True}, '''google/electra-small-discriminator''': {'''do_lower_case''': True}, '''google/electra-base-discriminator''': {'''do_lower_case''': True}, '''google/electra-large-discriminator''': {'''do_lower_case''': True}, } class snake_case__ ( SCREAMING_SNAKE_CASE_ ): A__ = VOCAB_FILES_NAMES A__ = PRETRAINED_VOCAB_FILES_MAP A__ = PRETRAINED_INIT_CONFIGURATION A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ = ElectraTokenizer def __init__( self : int , __a : List[Any]=None , __a : int=None , __a : List[str]=True , __a : Any="[UNK]" , __a : Any="[SEP]" , __a : Union[str, Any]="[PAD]" , __a : Dict="[CLS]" , __a : List[Any]="[MASK]" , __a : str=True , __a : Optional[int]=None , **__a : Optional[int] , ) -> str: '''simple docstring''' super().__init__( __a , tokenizer_file=__a , do_lower_case=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , tokenize_chinese_chars=__a , strip_accents=__a , **__a , ) __snake_case : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , __a ) != do_lower_case or normalizer_state.get('strip_accents' , __a ) != strip_accents or normalizer_state.get('handle_chinese_chars' , __a ) != tokenize_chinese_chars ): __snake_case : List[Any] = getattr(__a , normalizer_state.pop('type' ) ) __snake_case : str = do_lower_case __snake_case : Optional[int] = strip_accents __snake_case : Any = tokenize_chinese_chars __snake_case : Union[str, Any] = normalizer_class(**__a ) __snake_case : Any = do_lower_case def A_ ( self : Any , __a : List[str] , __a : Optional[Any]=None ) -> Dict: '''simple docstring''' __snake_case : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def A_ ( self : List[Any] , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' __snake_case : int = [self.sep_token_id] __snake_case : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def A_ ( self : Optional[int] , __a : str , __a : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' __snake_case : Tuple = self._tokenizer.model.save(__a , name=__a ) return tuple(__a )
0
1
'''simple docstring''' def a_ ( _UpperCAmelCase : int = 1_00 ) -> int: __snake_case : Any = n * (n + 1) * (2 * n + 1) / 6 __snake_case : Union[str, Any] = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F"""{solution() = }""")
0
'''simple docstring''' def a_ ( _UpperCAmelCase : int ) -> bool: __snake_case : Union[str, Any] = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(2_7)) print(perfect_cube(4))
0
1
'''simple docstring''' import sys import turtle def a_ ( _UpperCAmelCase : tuple[float, float] ,_UpperCAmelCase : tuple[float, float] ) -> tuple[float, float]: return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2 def a_ ( _UpperCAmelCase : tuple[float, float] ,_UpperCAmelCase : tuple[float, float] ,_UpperCAmelCase : tuple[float, float] ,_UpperCAmelCase : int ,) -> None: my_pen.up() my_pen.goto(vertexa[0] ,vertexa[1] ) my_pen.down() my_pen.goto(vertexa[0] ,vertexa[1] ) my_pen.goto(vertexa[0] ,vertexa[1] ) my_pen.goto(vertexa[0] ,vertexa[1] ) if depth == 0: return triangle(_UpperCAmelCase ,get_mid(_UpperCAmelCase ,_UpperCAmelCase ) ,get_mid(_UpperCAmelCase ,_UpperCAmelCase ) ,depth - 1 ) triangle(_UpperCAmelCase ,get_mid(_UpperCAmelCase ,_UpperCAmelCase ) ,get_mid(_UpperCAmelCase ,_UpperCAmelCase ) ,depth - 1 ) triangle(_UpperCAmelCase ,get_mid(_UpperCAmelCase ,_UpperCAmelCase ) ,get_mid(_UpperCAmelCase ,_UpperCAmelCase ) ,depth - 1 ) if __name__ == "__main__": if len(sys.argv) != 2: raise ValueError( '''Correct format for using this script: ''' '''python fractals.py <int:depth_for_fractal>''' ) A__ : str = turtle.Turtle() my_pen.ht() my_pen.speed(5) my_pen.pencolor('''red''') A__ : Dict = [(-1_7_5, -1_2_5), (0, 1_7_5), (1_7_5, -1_2_5)] # vertices of triangle triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
0
'''simple docstring''' import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss A__ : Tuple = pytest.mark.integration @require_faiss class snake_case__ ( SCREAMING_SNAKE_CASE_ ): def A_ ( self : Any ) -> Tuple: '''simple docstring''' __snake_case : Dict = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(__a ) for x in np.arange(30 ).tolist()]} ) return dset def A_ ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' import faiss __snake_case : Dataset = self._create_dummy_dataset() __snake_case : Dict = dset.map( lambda __a , __a : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__a , keep_in_memory=__a ) __snake_case : List[Any] = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT ) __snake_case , __snake_case : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) dset.drop_index('vecs' ) def A_ ( self : Tuple ) -> Any: '''simple docstring''' import faiss __snake_case : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , ) __snake_case , __snake_case : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) def A_ ( self : List[Any] ) -> Dict: '''simple docstring''' import faiss __snake_case : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=__a ) as tmp_file: dset.save_faiss_index('vecs' , tmp_file.name ) dset.load_faiss_index('vecs2' , tmp_file.name ) os.unlink(tmp_file.name ) __snake_case , __snake_case : str = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) def A_ ( self : Union[str, Any] ) -> Dict: '''simple docstring''' __snake_case : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' ) dset.drop_index('vecs' ) self.assertRaises(__a , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) ) def A_ ( self : List[str] ) -> List[str]: '''simple docstring''' from elasticsearch import Elasticsearch __snake_case : Dataset = self._create_dummy_dataset() with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch( 'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk: __snake_case : Any = {'acknowledged': True} mocked_bulk.return_value([(True, None)] * 30 ) __snake_case : Dict = {'hits': {'hits': [{'_score': 1, '_id': 29}]}} __snake_case : Union[str, Any] = Elasticsearch() dset.add_elasticsearch_index('filename' , es_client=__a ) __snake_case , __snake_case : str = dset.get_nearest_examples('filename' , 'my_name-train_29' ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) @require_faiss class snake_case__ ( SCREAMING_SNAKE_CASE_ ): def A_ ( self : str ) -> int: '''simple docstring''' import faiss __snake_case : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 10 ) # single query __snake_case : Dict = np.zeros(5 , dtype=np.floataa ) __snake_case : List[str] = 1 __snake_case , __snake_case : List[Any] = index.search(__a ) self.assertRaises(__a , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries __snake_case : List[str] = np.eye(5 , dtype=np.floataa )[::-1] __snake_case , __snake_case : Dict = index.search_batch(__a ) self.assertRaises(__a , index.search_batch , queries[0] ) __snake_case : Any = [scores[0] for scores in total_scores] __snake_case : List[Any] = [indices[0] for indices in total_indices] self.assertGreater(np.min(__a ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , __a ) def A_ ( self : int ) -> int: '''simple docstring''' import faiss __snake_case : int = FaissIndex(string_factory='Flat' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) __snake_case : List[str] = FaissIndex(string_factory='LSH' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(__a ): __snake_case : Dict = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) ) def A_ ( self : str ) -> Dict: '''simple docstring''' import faiss __snake_case : Tuple = faiss.IndexFlat(5 ) __snake_case : List[Any] = FaissIndex(custom_index=__a ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def A_ ( self : List[Any] ) -> int: '''simple docstring''' import faiss __snake_case : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=__a ) as tmp_file: index.save(tmp_file.name ) __snake_case : List[Any] = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) __snake_case : List[Any] = np.zeros(5 , dtype=np.floataa ) __snake_case : Any = 1 __snake_case , __snake_case : int = index.search(__a ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def a_ ( _UpperCAmelCase : str ) -> Optional[int]: import faiss __snake_case : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 ,dtype=np.floataa ) ) __snake_case : Dict = 'index.faiss' __snake_case : Any = f'''mock://{index_name}''' index.save(_UpperCAmelCase ,storage_options=mockfs.storage_options ) __snake_case : Any = FaissIndex.load(_UpperCAmelCase ,storage_options=mockfs.storage_options ) __snake_case : Any = np.zeros(5 ,dtype=np.floataa ) __snake_case : Any = 1 __snake_case , __snake_case : Tuple = index.search(_UpperCAmelCase ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class snake_case__ ( SCREAMING_SNAKE_CASE_ ): def A_ ( self : List[str] ) -> List[str]: '''simple docstring''' from elasticsearch import Elasticsearch with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch( 'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk: __snake_case : int = Elasticsearch() __snake_case : Dict = {'acknowledged': True} __snake_case : List[Any] = ElasticSearchIndex(es_client=__a ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(['foo', 'bar', 'foobar'] ) # single query __snake_case : Optional[Any] = 'foo' __snake_case : int = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} __snake_case , __snake_case : List[Any] = index.search(__a ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout __snake_case : Dict = 'foo' __snake_case : Dict = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} __snake_case , __snake_case : Optional[Any] = index.search(__a , request_timeout=30 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries __snake_case : List[Any] = ['foo', 'bar', 'foobar'] __snake_case : str = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} __snake_case , __snake_case : Any = index.search_batch(__a ) __snake_case : Any = [scores[0] for scores in total_scores] __snake_case : Tuple = [indices[0] for indices in total_indices] self.assertGreater(np.min(__a ) , 0 ) self.assertListEqual([1, 1, 1] , __a ) # batched queries with timeout __snake_case : Tuple = ['foo', 'bar', 'foobar'] __snake_case : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} __snake_case , __snake_case : int = index.search_batch(__a , request_timeout=30 ) __snake_case : Any = [scores[0] for scores in total_scores] __snake_case : Dict = [indices[0] for indices in total_indices] self.assertGreater(np.min(__a ) , 0 ) self.assertListEqual([1, 1, 1] , __a )
0
1
'''simple docstring''' from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class snake_case__ ( SCREAMING_SNAKE_CASE_ ): def __init__( self : List[str] , __a : Callable , __a : Optional[Features] = None , __a : str = None , __a : bool = False , __a : bool = False , __a : Optional[dict] = None , __a : Optional[int] = None , **__a : Optional[int] , ) -> str: '''simple docstring''' super().__init__( features=__a , cache_dir=__a , keep_in_memory=__a , streaming=__a , num_proc=__a , **__a , ) __snake_case : List[str] = Generator( cache_dir=__a , features=__a , generator=__a , gen_kwargs=__a , **__a , ) def A_ ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' # Build iterable dataset if self.streaming: __snake_case : Tuple = self.builder.as_streaming_dataset(split='train' ) # Build regular (map-style) dataset else: __snake_case : Dict = None __snake_case : Tuple = None __snake_case : List[str] = None __snake_case : List[Any] = None self.builder.download_and_prepare( download_config=__a , download_mode=__a , verification_mode=__a , base_path=__a , num_proc=self.num_proc , ) __snake_case : List[str] = self.builder.as_dataset( split='train' , verification_mode=__a , in_memory=self.keep_in_memory ) return dataset
0
'''simple docstring''' from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging A__ : List[Any] = logging.get_logger(__name__) A__ : Tuple = { '''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''', '''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''', '''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''', '''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''', '''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''', } class snake_case__ ( SCREAMING_SNAKE_CASE_ ): A__ = '''t5''' A__ = ['''past_key_values'''] A__ = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self : str , __a : Dict=32128 , __a : Dict=512 , __a : Union[str, Any]=64 , __a : str=2048 , __a : Union[str, Any]=6 , __a : Any=None , __a : Any=8 , __a : List[Any]=32 , __a : Any=128 , __a : Tuple=0.1 , __a : str=1e-6 , __a : Dict=1.0 , __a : Tuple="relu" , __a : Dict=True , __a : Union[str, Any]=True , __a : Any=0 , __a : Dict=1 , **__a : Union[str, Any] , ) -> Union[str, Any]: '''simple docstring''' __snake_case : int = vocab_size __snake_case : str = d_model __snake_case : str = d_kv __snake_case : List[Any] = d_ff __snake_case : List[str] = num_layers __snake_case : Tuple = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry __snake_case : Union[str, Any] = num_heads __snake_case : Tuple = relative_attention_num_buckets __snake_case : Optional[int] = relative_attention_max_distance __snake_case : Optional[Any] = dropout_rate __snake_case : str = layer_norm_epsilon __snake_case : List[str] = initializer_factor __snake_case : int = feed_forward_proj __snake_case : Optional[Any] = use_cache __snake_case : Optional[Any] = self.feed_forward_proj.split('-' ) __snake_case : Dict = act_info[-1] __snake_case : List[str] = act_info[0] == 'gated' if len(__a ) > 1 and act_info[0] != "gated" or len(__a ) > 2: raise ValueError( f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.''' 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": __snake_case : Dict = 'gelu_new' super().__init__( pad_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , **__a , ) class snake_case__ ( SCREAMING_SNAKE_CASE_ ): @property def A_ ( self : str ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' __snake_case : Union[str, Any] = { 'input_ids': {0: 'batch', 1: 'encoder_sequence'}, 'attention_mask': {0: 'batch', 1: 'encoder_sequence'}, } if self.use_past: __snake_case : Tuple = 'past_encoder_sequence + sequence' __snake_case : Dict = {0: 'batch'} __snake_case : Dict = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: __snake_case : Tuple = {0: 'batch', 1: 'decoder_sequence'} __snake_case : int = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(__a , direction='inputs' ) return common_inputs @property def A_ ( self : List[Any] ) -> int: '''simple docstring''' return 13
0
1
'''simple docstring''' import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging A__ : List[str] = logging.get_logger(__name__) A__ : List[Any] = '''▁''' A__ : Optional[Any] = {'''vocab_file''': '''sentencepiece.bpe.model'''} A__ : Union[str, Any] = { '''vocab_file''': { '''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large-finetuned-conll02-dutch''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll02-spanish''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-english''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-german''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model''' ), } } A__ : Optional[Any] = { '''xlm-roberta-base''': 5_1_2, '''xlm-roberta-large''': 5_1_2, '''xlm-roberta-large-finetuned-conll02-dutch''': 5_1_2, '''xlm-roberta-large-finetuned-conll02-spanish''': 5_1_2, '''xlm-roberta-large-finetuned-conll03-english''': 5_1_2, '''xlm-roberta-large-finetuned-conll03-german''': 5_1_2, } class snake_case__ ( SCREAMING_SNAKE_CASE_ ): A__ = VOCAB_FILES_NAMES A__ = PRETRAINED_VOCAB_FILES_MAP A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ = ['''input_ids''', '''attention_mask'''] def __init__( self : List[Any] , __a : int , __a : List[Any]="<s>" , __a : Optional[int]="</s>" , __a : int="</s>" , __a : Any="<s>" , __a : int="<unk>" , __a : Optional[int]="<pad>" , __a : Optional[Any]="<mask>" , __a : Optional[Dict[str, Any]] = None , **__a : Dict , ) -> None: '''simple docstring''' # Mask token behave like a normal word, i.e. include the space before it __snake_case : List[str] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token __snake_case : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , ) __snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__a ) ) __snake_case : Any = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token __snake_case : Dict = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab __snake_case : Union[str, Any] = 1 __snake_case : Any = len(self.sp_model ) + self.fairseq_offset __snake_case : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Tuple ) -> Union[str, Any]: '''simple docstring''' __snake_case : Dict = self.__dict__.copy() __snake_case : Any = None __snake_case : Dict = self.sp_model.serialized_model_proto() return state def __setstate__( self : Dict , __a : str ) -> List[Any]: '''simple docstring''' __snake_case : Any = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __snake_case : Union[str, Any] = {} __snake_case : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def A_ ( self : Union[str, Any] , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __snake_case : List[str] = [self.cls_token_id] __snake_case : Tuple = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def A_ ( self : Any , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a ) if token_ids_a is None: return [1] + ([0] * len(__a )) + [1] return [1] + ([0] * len(__a )) + [1, 1] + ([0] * len(__a )) + [1] def A_ ( self : List[Any] , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' __snake_case : Union[str, Any] = [self.sep_token_id] __snake_case : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def A_ ( self : Dict ) -> List[Any]: '''simple docstring''' return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def A_ ( self : List[Any] ) -> Union[str, Any]: '''simple docstring''' __snake_case : int = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def A_ ( self : Optional[Any] , __a : str ) -> List[str]: '''simple docstring''' return self.sp_model.encode(__a , out_type=__a ) def A_ ( self : Tuple , __a : Dict ) -> Any: '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __snake_case : Optional[int] = self.sp_model.PieceToId(__a ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def A_ ( self : Optional[Any] , __a : Union[str, Any] ) -> Dict: '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def A_ ( self : Tuple , __a : Any ) -> Any: '''simple docstring''' __snake_case : Any = ''.join(__a ).replace(__a , ' ' ).strip() return out_string def A_ ( self : Dict , __a : str , __a : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(__a ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __snake_case : Any = os.path.join( __a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __a ) elif not os.path.isfile(self.vocab_file ): with open(__a , 'wb' ) as fi: __snake_case : str = self.sp_model.serialized_model_proto() fi.write(__a ) return (out_vocab_file,)
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging A__ : Tuple = logging.get_logger(__name__) A__ : Optional[int] = {} class snake_case__ ( SCREAMING_SNAKE_CASE_ ): A__ = '''llama''' A__ = ['''past_key_values'''] def __init__( self : Any , __a : List[str]=32000 , __a : Union[str, Any]=4096 , __a : Optional[Any]=11008 , __a : Any=32 , __a : str=32 , __a : Optional[int]=None , __a : Dict="silu" , __a : Dict=2048 , __a : List[str]=0.0_2 , __a : Union[str, Any]=1e-6 , __a : Dict=True , __a : List[str]=0 , __a : Tuple=1 , __a : Tuple=2 , __a : Optional[Any]=1 , __a : Any=False , __a : Tuple=None , **__a : List[Any] , ) -> Optional[int]: '''simple docstring''' __snake_case : str = vocab_size __snake_case : List[str] = max_position_embeddings __snake_case : List[Any] = hidden_size __snake_case : Union[str, Any] = intermediate_size __snake_case : Optional[int] = num_hidden_layers __snake_case : List[Any] = num_attention_heads # for backward compatibility if num_key_value_heads is None: __snake_case : Optional[int] = num_attention_heads __snake_case : Optional[Any] = num_key_value_heads __snake_case : int = hidden_act __snake_case : Any = initializer_range __snake_case : Any = rms_norm_eps __snake_case : Union[str, Any] = pretraining_tp __snake_case : Optional[int] = use_cache __snake_case : Any = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , tie_word_embeddings=__a , **__a , ) def A_ ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __a ) or len(self.rope_scaling ) != 2: raise ValueError( '`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ' f'''got {self.rope_scaling}''' ) __snake_case : Optional[Any] = self.rope_scaling.get('type' , __a ) __snake_case : Tuple = self.rope_scaling.get('factor' , __a ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(__a , __a ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
0
1
'''simple docstring''' import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def a_ ( _UpperCAmelCase : Features ) -> Optional[int]: __snake_case : str = np.inf def set_batch_size(_UpperCAmelCase : FeatureType ) -> None: nonlocal batch_size if isinstance(_UpperCAmelCase ,_UpperCAmelCase ): __snake_case : Any = min(_UpperCAmelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS ) elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ): __snake_case : List[str] = min(_UpperCAmelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS ) elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ) and feature.dtype == "binary": __snake_case : Optional[int] = min(_UpperCAmelCase ,config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS ) _visit(_UpperCAmelCase ,_UpperCAmelCase ) return None if batch_size is np.inf else batch_size class snake_case__ ( SCREAMING_SNAKE_CASE_ ): def __init__( self : Optional[Any] , __a : NestedDataStructureLike[PathLike] , __a : Optional[NamedSplit] = None , __a : Optional[Features] = None , __a : str = None , __a : bool = False , __a : bool = False , __a : Optional[int] = None , **__a : Tuple , ) -> Tuple: '''simple docstring''' super().__init__( __a , split=__a , features=__a , cache_dir=__a , keep_in_memory=__a , streaming=__a , num_proc=__a , **__a , ) __snake_case : Any = path_or_paths if isinstance(__a , __a ) else {self.split: path_or_paths} __snake_case : Tuple = _PACKAGED_DATASETS_MODULES['parquet'][1] __snake_case : Union[str, Any] = Parquet( cache_dir=__a , data_files=__a , features=__a , hash=__a , **__a , ) def A_ ( self : List[str] ) -> List[Any]: '''simple docstring''' # Build iterable dataset if self.streaming: __snake_case : Optional[Any] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: __snake_case : int = None __snake_case : Any = None __snake_case : Optional[int] = None __snake_case : str = None self.builder.download_and_prepare( download_config=__a , download_mode=__a , verification_mode=__a , base_path=__a , num_proc=self.num_proc , ) __snake_case : Union[str, Any] = self.builder.as_dataset( split=self.split , verification_mode=__a , in_memory=self.keep_in_memory ) return dataset class snake_case__ : def __init__( self : List[Any] , __a : Dataset , __a : Union[PathLike, BinaryIO] , __a : Optional[int] = None , **__a : List[Any] , ) -> int: '''simple docstring''' __snake_case : List[str] = dataset __snake_case : Optional[Any] = path_or_buf __snake_case : List[Any] = batch_size or get_writer_batch_size(dataset.features ) __snake_case : Tuple = parquet_writer_kwargs def A_ ( self : List[Any] ) -> int: '''simple docstring''' __snake_case : Optional[int] = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with open(self.path_or_buf , 'wb+' ) as buffer: __snake_case : Union[str, Any] = self._write(file_obj=__a , batch_size=__a , **self.parquet_writer_kwargs ) else: __snake_case : Optional[Any] = self._write(file_obj=self.path_or_buf , batch_size=__a , **self.parquet_writer_kwargs ) return written def A_ ( self : List[str] , __a : BinaryIO , __a : int , **__a : Any ) -> int: '''simple docstring''' __snake_case : Any = 0 __snake_case : Union[str, Any] = parquet_writer_kwargs.pop('path_or_buf' , __a ) __snake_case : List[str] = self.dataset.features.arrow_schema __snake_case : Tuple = pq.ParquetWriter(__a , schema=__a , **__a ) for offset in logging.tqdm( range(0 , len(self.dataset ) , __a ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating parquet from Arrow format' , ): __snake_case : Dict = query_table( table=self.dataset._data , key=slice(__a , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(__a ) written += batch.nbytes writer.close() return written
0
'''simple docstring''' from __future__ import annotations A__ : str = '''Muhammad Umer Farooq''' A__ : int = '''MIT''' A__ : Optional[int] = '''1.0.0''' A__ : List[Any] = '''Muhammad Umer Farooq''' A__ : Optional[Any] = '''[email protected]''' A__ : Optional[Any] = '''Alpha''' import re from html.parser import HTMLParser from urllib import parse import requests class snake_case__ ( SCREAMING_SNAKE_CASE_ ): def __init__( self : Union[str, Any] , __a : str ) -> None: '''simple docstring''' super().__init__() __snake_case : list[str] = [] __snake_case : Dict = domain def A_ ( self : Dict , __a : str , __a : list[tuple[str, str | None]] ) -> None: '''simple docstring''' # Only parse the 'anchor' tag. if tag == "a": # Check the list of defined attributes. for name, value in attrs: # If href is defined, and not empty nor # print it. if name == "href" and value != "#" and value != "": # If not already in urls. if value not in self.urls: __snake_case : Optional[Any] = parse.urljoin(self.domain , __a ) self.urls.append(__a ) def a_ ( _UpperCAmelCase : str ) -> str: return ".".join(get_sub_domain_name(_UpperCAmelCase ).split('.' )[-2:] ) def a_ ( _UpperCAmelCase : str ) -> str: return parse.urlparse(_UpperCAmelCase ).netloc def a_ ( _UpperCAmelCase : str = "https://github.com" ) -> list[str]: __snake_case : List[Any] = get_domain_name(_UpperCAmelCase ) # Initialize the parser __snake_case : Tuple = Parser(_UpperCAmelCase ) try: # Open URL __snake_case : Any = requests.get(_UpperCAmelCase ) # pass the raw HTML to the parser to get links parser.feed(r.text ) # Get links and loop through __snake_case : Dict = set() for link in parser.urls: # open URL. # read = requests.get(link) try: __snake_case : List[Any] = requests.get(_UpperCAmelCase ) # Get the valid email. __snake_case : Optional[Any] = re.findall('[a-zA-Z0-9]+@' + domain ,read.text ) # If not in list then append it. for email in emails: valid_emails.add(_UpperCAmelCase ) except ValueError: pass except ValueError: raise SystemExit(1 ) # Finally return a sorted list of email addresses with no duplicates. return sorted(_UpperCAmelCase ) if __name__ == "__main__": A__ : Tuple = emails_from_url('''https://github.com''') print(F"""{len(emails)} emails found:""") print('''\n'''.join(sorted(emails)))
0
1
'''simple docstring''' import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def a_ ( _UpperCAmelCase : Any ,_UpperCAmelCase : Any ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Any ) -> Union[str, Any]: # load base model __snake_case : Optional[Any] = StableDiffusionPipeline.from_pretrained(_UpperCAmelCase ,torch_dtype=torch.floataa ) # load LoRA weight from .safetensors __snake_case : List[Any] = load_file(_UpperCAmelCase ) __snake_case : Optional[int] = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: __snake_case : Union[str, Any] = key.split('.' )[0].split(LORA_PREFIX_TEXT_ENCODER + '_' )[-1].split('_' ) __snake_case : List[str] = pipeline.text_encoder else: __snake_case : Union[str, Any] = key.split('.' )[0].split(LORA_PREFIX_UNET + '_' )[-1].split('_' ) __snake_case : List[str] = pipeline.unet # find the target layer __snake_case : Union[str, Any] = layer_infos.pop(0 ) while len(_UpperCAmelCase ) > -1: try: __snake_case : List[str] = curr_layer.__getattr__(_UpperCAmelCase ) if len(_UpperCAmelCase ) > 0: __snake_case : Tuple = layer_infos.pop(0 ) elif len(_UpperCAmelCase ) == 0: break except Exception: if len(_UpperCAmelCase ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: __snake_case : Optional[int] = layer_infos.pop(0 ) __snake_case : Tuple = [] if "lora_down" in key: pair_keys.append(key.replace('lora_down' ,'lora_up' ) ) pair_keys.append(_UpperCAmelCase ) else: pair_keys.append(_UpperCAmelCase ) pair_keys.append(key.replace('lora_up' ,'lora_down' ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: __snake_case : Optional[Any] = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) __snake_case : List[str] = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(_UpperCAmelCase ,_UpperCAmelCase ).unsqueeze(2 ).unsqueeze(3 ) else: __snake_case : str = state_dict[pair_keys[0]].to(torch.floataa ) __snake_case : Tuple = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(_UpperCAmelCase ,_UpperCAmelCase ) # update visited list for item in pair_keys: visited.append(_UpperCAmelCase ) return pipeline if __name__ == "__main__": A__ : str = argparse.ArgumentParser() parser.add_argument( '''--base_model_path''', default=None, type=str, required=True, help='''Path to the base model in diffusers format.''' ) parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') parser.add_argument( '''--lora_prefix_unet''', default='''lora_unet''', type=str, help='''The prefix of UNet weight in safetensors''' ) parser.add_argument( '''--lora_prefix_text_encoder''', default='''lora_te''', type=str, help='''The prefix of text encoder weight in safetensors''', ) parser.add_argument('''--alpha''', default=0.75, type=float, help='''The merging ratio in W = W0 + alpha * deltaW''') parser.add_argument( '''--to_safetensors''', action='''store_true''', help='''Whether to store pipeline in safetensors format or not.''' ) parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''') A__ : str = parser.parse_args() A__ : Optional[Any] = args.base_model_path A__ : int = args.checkpoint_path A__ : str = args.dump_path A__ : Tuple = args.lora_prefix_unet A__ : Optional[Any] = args.lora_prefix_text_encoder A__ : Optional[Any] = args.alpha A__ : int = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) A__ : int = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
0
'''simple docstring''' import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) A__ : Dict = logging.getLogger() def a_ ( ) -> Tuple: __snake_case : List[Any] = argparse.ArgumentParser() parser.add_argument('-f' ) __snake_case : Any = parser.parse_args() return args.f def a_ ( _UpperCAmelCase : Optional[int] ) -> List[Any]: __snake_case : Tuple = {} __snake_case : Union[str, Any] = os.path.join(_UpperCAmelCase ,'all_results.json' ) if os.path.exists(_UpperCAmelCase ): with open(_UpperCAmelCase ,'r' ) as f: __snake_case : List[str] = json.load(_UpperCAmelCase ) else: raise ValueError(f'''can\'t find {path}''' ) return results def a_ ( ) -> Union[str, Any]: __snake_case : Union[str, Any] = torch.cuda.is_available() and torch_device == 'cuda' return is_using_cuda and is_apex_available() A__ : str = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class snake_case__ ( SCREAMING_SNAKE_CASE_ ): @classmethod def A_ ( cls : Any ) -> List[str]: '''simple docstring''' # Write Accelerate config, will pick up on CPU, GPU, and multi-GPU __snake_case : Optional[int] = tempfile.mkdtemp() __snake_case : Dict = os.path.join(cls.tmpdir , 'default_config.yml' ) write_basic_config(save_location=cls.configPath ) __snake_case : List[Any] = ['accelerate', 'launch', '--config_file', cls.configPath] @classmethod def A_ ( cls : List[str] ) -> List[str]: '''simple docstring''' shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : Any ) -> Optional[Any]: '''simple docstring''' __snake_case : List[Any] = self.get_auto_remove_tmp_dir() __snake_case : Dict = f''' {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --seed=42 --checkpointing_steps epoch --with_tracking '''.split() if is_cuda_and_apex_available(): testargs.append('--fp16' ) run_command(self._launch_args + testargs ) __snake_case : List[Any] = get_results(__a ) self.assertGreaterEqual(result['eval_accuracy'] , 0.7_5 ) self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'glue_no_trainer' ) ) ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : List[Any] ) -> Union[str, Any]: '''simple docstring''' __snake_case : Tuple = self.get_auto_remove_tmp_dir() __snake_case : str = f''' {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --block_size 128 --per_device_train_batch_size 5 --per_device_eval_batch_size 5 --num_train_epochs 2 --output_dir {tmp_dir} --checkpointing_steps epoch --with_tracking '''.split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) __snake_case : str = get_results(__a ) self.assertLess(result['perplexity'] , 100 ) self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'clm_no_trainer' ) ) ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : str ) -> List[str]: '''simple docstring''' __snake_case : int = self.get_auto_remove_tmp_dir() __snake_case : List[str] = f''' {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --num_train_epochs=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) __snake_case : List[str] = get_results(__a ) self.assertLess(result['perplexity'] , 42 ) self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'mlm_no_trainer' ) ) ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu __snake_case : Any = 7 if get_gpu_count() > 1 else 2 __snake_case : Any = self.get_auto_remove_tmp_dir() __snake_case : int = f''' {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) __snake_case : Dict = get_results(__a ) self.assertGreaterEqual(result['eval_accuracy'] , 0.7_5 ) self.assertLess(result['train_loss'] , 0.5 ) self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'ner_no_trainer' ) ) ) @unittest.skip(reason='Fix me @muellerzr' ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : Any ) -> List[Any]: '''simple docstring''' __snake_case : Any = self.get_auto_remove_tmp_dir() __snake_case : Tuple = f''' {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --seed=42 --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) __snake_case : str = get_results(__a ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result['eval_f1'] , 28 ) self.assertGreaterEqual(result['eval_exact'] , 28 ) self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'qa_no_trainer' ) ) ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : Dict ) -> List[Any]: '''simple docstring''' __snake_case : str = self.get_auto_remove_tmp_dir() __snake_case : Any = f''' {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/swag/sample.json --validation_file tests/fixtures/tests_samples/swag/sample.json --output_dir {tmp_dir} --max_train_steps=20 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --with_tracking '''.split() run_command(self._launch_args + testargs ) __snake_case : str = get_results(__a ) self.assertGreaterEqual(result['eval_accuracy'] , 0.8 ) self.assertTrue(os.path.exists(os.path.join(__a , 'swag_no_trainer' ) ) ) @slow @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : Any ) -> Union[str, Any]: '''simple docstring''' __snake_case : Tuple = self.get_auto_remove_tmp_dir() __snake_case : List[str] = f''' {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) __snake_case : int = get_results(__a ) self.assertGreaterEqual(result['eval_rouge1'] , 10 ) self.assertGreaterEqual(result['eval_rouge2'] , 2 ) self.assertGreaterEqual(result['eval_rougeL'] , 7 ) self.assertGreaterEqual(result['eval_rougeLsum'] , 7 ) self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'summarization_no_trainer' ) ) ) @slow @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : Union[str, Any] ) -> int: '''simple docstring''' __snake_case : Tuple = self.get_auto_remove_tmp_dir() __snake_case : str = f''' {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py --model_name_or_path sshleifer/student_marian_en_ro_6_1 --source_lang en --target_lang ro --train_file tests/fixtures/tests_samples/wmt16/sample.json --validation_file tests/fixtures/tests_samples/wmt16/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --num_beams=6 --learning_rate=3e-3 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --source_lang en_XX --target_lang ro_RO --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) __snake_case : Dict = get_results(__a ) self.assertGreaterEqual(result['eval_bleu'] , 30 ) self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'translation_no_trainer' ) ) ) @slow def A_ ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' __snake_case : Union[str, Any] = logging.StreamHandler(sys.stdout ) logger.addHandler(__a ) __snake_case : List[str] = self.get_auto_remove_tmp_dir() __snake_case : int = f''' {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py --dataset_name huggingface/semantic-segmentation-test-sample --output_dir {tmp_dir} --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch '''.split() run_command(self._launch_args + testargs ) __snake_case : List[str] = get_results(__a ) self.assertGreaterEqual(result['eval_overall_accuracy'] , 0.1_0 ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : Tuple ) -> Any: '''simple docstring''' __snake_case : Dict = self.get_auto_remove_tmp_dir() __snake_case : Dict = f''' {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py --model_name_or_path google/vit-base-patch16-224-in21k --dataset_name hf-internal-testing/cats_vs_dogs_sample --learning_rate 1e-4 --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --max_train_steps 2 --train_val_split 0.1 --seed 42 --output_dir {tmp_dir} --with_tracking --checkpointing_steps 1 '''.split() if is_cuda_and_apex_available(): testargs.append('--fp16' ) run_command(self._launch_args + testargs ) __snake_case : Optional[int] = get_results(__a ) # The base model scores a 25% self.assertGreaterEqual(result['eval_accuracy'] , 0.6 ) self.assertTrue(os.path.exists(os.path.join(__a , 'step_1' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'image_classification_no_trainer' ) ) )
0
1
'''simple docstring''' from argparse import ArgumentParser, Namespace from typing import Any, List, Optional from ..pipelines import Pipeline, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand try: from fastapi import Body, FastAPI, HTTPException from fastapi.routing import APIRoute from pydantic import BaseModel from starlette.responses import JSONResponse from uvicorn import run A__ : int = True except (ImportError, AttributeError): A__ : Union[str, Any] = object def a_ ( *_UpperCAmelCase : Optional[int] ,**_UpperCAmelCase : Optional[int] ) -> Union[str, Any]: pass A__ : Optional[Any] = False A__ : int = logging.get_logger('''transformers-cli/serving''') def a_ ( _UpperCAmelCase : Namespace ) -> Optional[int]: __snake_case : Tuple = pipeline( task=args.task ,model=args.model if args.model else None ,config=args.config ,tokenizer=args.tokenizer ,device=args.device ,) return ServeCommand(_UpperCAmelCase ,args.host ,args.port ,args.workers ) class snake_case__ ( SCREAMING_SNAKE_CASE_ ): A__ = 42 class snake_case__ ( SCREAMING_SNAKE_CASE_ ): A__ = 42 A__ = 42 class snake_case__ ( SCREAMING_SNAKE_CASE_ ): A__ = 42 class snake_case__ ( SCREAMING_SNAKE_CASE_ ): A__ = 42 class snake_case__ ( SCREAMING_SNAKE_CASE_ ): @staticmethod def A_ ( __a : ArgumentParser ) -> Optional[Any]: '''simple docstring''' __snake_case : Tuple = parser.add_parser( 'serve' , help='CLI tool to run inference requests through REST and GraphQL endpoints.' ) serve_parser.add_argument( '--task' , type=__a , choices=get_supported_tasks() , help='The task to run the pipeline on' , ) serve_parser.add_argument('--host' , type=__a , default='localhost' , help='Interface the server will listen on.' ) serve_parser.add_argument('--port' , type=__a , default=8888 , help='Port the serving will listen to.' ) serve_parser.add_argument('--workers' , type=__a , default=1 , help='Number of http workers' ) serve_parser.add_argument('--model' , type=__a , help='Model\'s name or path to stored model.' ) serve_parser.add_argument('--config' , type=__a , help='Model\'s config name or path to stored model.' ) serve_parser.add_argument('--tokenizer' , type=__a , help='Tokenizer name to use.' ) serve_parser.add_argument( '--device' , type=__a , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , ) serve_parser.set_defaults(func=__a ) def __init__( self : Union[str, Any] , __a : Pipeline , __a : str , __a : int , __a : int ) -> Any: '''simple docstring''' __snake_case : List[str] = pipeline __snake_case : List[str] = host __snake_case : Optional[int] = port __snake_case : List[Any] = workers if not _serve_dependencies_installed: raise RuntimeError( 'Using serve command requires FastAPI and uvicorn. ' 'Please install transformers with [serving]: pip install "transformers[serving]".' 'Or install FastAPI and uvicorn separately.' ) else: logger.info(f'''Serving model over {host}:{port}''' ) __snake_case : List[Any] = FastAPI( routes=[ APIRoute( '/' , self.model_info , response_model=__a , response_class=__a , methods=['GET'] , ), APIRoute( '/tokenize' , self.tokenize , response_model=__a , response_class=__a , methods=['POST'] , ), APIRoute( '/detokenize' , self.detokenize , response_model=__a , response_class=__a , methods=['POST'] , ), APIRoute( '/forward' , self.forward , response_model=__a , response_class=__a , methods=['POST'] , ), ] , timeout=600 , ) def A_ ( self : List[str] ) -> int: '''simple docstring''' run(self._app , host=self.host , port=self.port , workers=self.workers ) def A_ ( self : Optional[Any] ) -> str: '''simple docstring''' return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) ) def A_ ( self : int , __a : str = Body(__a , embed=__a ) , __a : bool = Body(__a , embed=__a ) ) -> Optional[int]: '''simple docstring''' try: __snake_case : str = self._pipeline.tokenizer.tokenize(__a ) if return_ids: __snake_case : Optional[Any] = self._pipeline.tokenizer.convert_tokens_to_ids(__a ) return ServeTokenizeResult(tokens=__a , tokens_ids=__a ) else: return ServeTokenizeResult(tokens=__a ) except Exception as e: raise HTTPException(status_code=500 , detail={'model': '', 'error': str(__a )} ) def A_ ( self : Optional[int] , __a : List[int] = Body(__a , embed=__a ) , __a : bool = Body(__a , embed=__a ) , __a : bool = Body(__a , embed=__a ) , ) -> List[str]: '''simple docstring''' try: __snake_case : str = self._pipeline.tokenizer.decode(__a , __a , __a ) return ServeDeTokenizeResult(model='' , text=__a ) except Exception as e: raise HTTPException(status_code=500 , detail={'model': '', 'error': str(__a )} ) async def A_ ( self : List[Any] , __a : Union[str, Any]=Body(__a , embed=__a ) ) -> Any: '''simple docstring''' # Check we don't have empty string if len(__a ) == 0: return ServeForwardResult(output=[] , attention=[] ) try: # Forward through the model __snake_case : Optional[Any] = self._pipeline(__a ) return ServeForwardResult(output=__a ) except Exception as e: raise HTTPException(500 , {'error': str(__a )} )
0
'''simple docstring''' import math def a_ ( _UpperCAmelCase : int ) -> list: __snake_case : Optional[Any] = [True] * n __snake_case : Optional[int] = False __snake_case : Dict = False __snake_case : List[Any] = True for i in range(3 ,int(n**0.5 + 1 ) ,2 ): __snake_case : Optional[int] = i * 2 while index < n: __snake_case : Union[str, Any] = False __snake_case : int = index + i __snake_case : Dict = [2] for i in range(3 ,_UpperCAmelCase ,2 ): if is_prime[i]: primes.append(_UpperCAmelCase ) return primes def a_ ( _UpperCAmelCase : int = 99_99_66_66_33_33 ) -> int: __snake_case : List[Any] = math.floor(math.sqrt(_UpperCAmelCase ) ) + 1_00 __snake_case : Tuple = prime_sieve(_UpperCAmelCase ) __snake_case : List[Any] = 0 __snake_case : List[Any] = 0 __snake_case : Optional[int] = primes[prime_index] while (last_prime**2) <= limit: __snake_case : Optional[int] = primes[prime_index + 1] __snake_case : Union[str, Any] = last_prime**2 __snake_case : Dict = next_prime**2 # Get numbers divisible by lps(current) __snake_case : Optional[Any] = lower_bound + last_prime while upper_bound > current <= limit: matches_sum += current current += last_prime # Reset the upper_bound while (upper_bound - next_prime) > limit: upper_bound -= next_prime # Add the numbers divisible by ups(current) __snake_case : Optional[Any] = upper_bound - next_prime while current > lower_bound: matches_sum += current current -= next_prime # Remove the numbers divisible by both ups and lps __snake_case : List[str] = 0 while upper_bound > current <= limit: if current <= lower_bound: # Increment the current number current += last_prime * next_prime continue if current > limit: break # Remove twice since it was added by both ups and lps matches_sum -= current * 2 # Increment the current number current += last_prime * next_prime # Setup for next pair __snake_case : Dict = next_prime prime_index += 1 return matches_sum if __name__ == "__main__": print(solution())
0
1
'''simple docstring''' # Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position A__ : List[str] = '''2.13.1''' import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse('''3.7'''): raise ImportWarning( '''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.''' ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( '''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n''' '''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.''' ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip A__ : Optional[int] = concatenate_datasets A__ : List[Any] = DownloadConfig A__ : str = DownloadManager A__ : Optional[int] = DownloadMode A__ : Union[str, Any] = DownloadConfig A__ : List[str] = DownloadMode A__ : List[Any] = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
0
'''simple docstring''' def a_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ) -> float: return price * (1 + tax_rate) if __name__ == "__main__": print(F"""{price_plus_tax(1_0_0, 0.25) = }""") print(F"""{price_plus_tax(1_25.50, 0.05) = }""")
0
1
'''simple docstring''' import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging A__ : List[str] = logging.get_logger(__name__) def a_ ( _UpperCAmelCase : Any ) -> List[str]: __snake_case : Any = r'\w+[.]\d+' __snake_case : List[str] = re.findall(_UpperCAmelCase ,_UpperCAmelCase ) for pat in pats: __snake_case : Union[str, Any] = key.replace(_UpperCAmelCase ,'_'.join(pat.split('.' ) ) ) return key def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : Dict ,_UpperCAmelCase : List[str] ) -> Optional[Any]: __snake_case : Tuple = pt_tuple_key[:-1] + ('scale',) if ( any('norm' in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): __snake_case : List[str] = pt_tuple_key[:-1] + ('scale',) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: __snake_case : str = pt_tuple_key[:-1] + ('scale',) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: __snake_case : List[str] = pt_tuple_key[:-1] + ('embedding',) return renamed_pt_tuple_key, pt_tensor # conv layer __snake_case : Optional[int] = pt_tuple_key[:-1] + ('kernel',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: __snake_case : str = pt_tensor.transpose(2 ,3 ,1 ,0 ) return renamed_pt_tuple_key, pt_tensor # linear layer __snake_case : str = pt_tuple_key[:-1] + ('kernel',) if pt_tuple_key[-1] == "weight": __snake_case : Optional[Any] = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight __snake_case : Union[str, Any] = pt_tuple_key[:-1] + ('weight',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias __snake_case : Optional[Any] = pt_tuple_key[:-1] + ('bias',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def a_ ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : List[str]=42 ) -> Union[str, Any]: # Step 1: Convert pytorch tensor to numpy __snake_case : str = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params __snake_case : str = flax_model.init_weights(PRNGKey(_UpperCAmelCase ) ) __snake_case : Optional[Any] = flatten_dict(_UpperCAmelCase ) __snake_case : List[Any] = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): __snake_case : List[str] = rename_key(_UpperCAmelCase ) __snake_case : int = tuple(renamed_pt_key.split('.' ) ) # Correctly rename weight parameters __snake_case , __snake_case : List[Any] = rename_key_and_reshape_tensor(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # also add unexpected weight so that warning is thrown __snake_case : List[Any] = jnp.asarray(_UpperCAmelCase ) return unflatten_dict(_UpperCAmelCase )
0
'''simple docstring''' from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class snake_case__ ( SCREAMING_SNAKE_CASE_ ): def A_ ( self : List[Any] ) -> int: '''simple docstring''' __snake_case : Optional[int] = SMALL_MODEL_IDENTIFIER __snake_case : str = 'pt' __snake_case : Union[str, Any] = 'tf' def A_ ( self : Dict , __a : Tuple ) -> Dict: '''simple docstring''' __snake_case : Optional[int] = AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(__a ) def A_ ( self : Any , __a : Optional[Any] ) -> Dict: '''simple docstring''' __snake_case : Union[str, Any] = TFAutoModel.from_pretrained(self.test_model , from_pt=__a ) model_tf.save_pretrained(__a ) def A_ ( self : Any ) -> Tuple: '''simple docstring''' __snake_case : Tuple = 'mock_framework' # Framework provided - return whatever the user provides __snake_case : int = FeaturesManager.determine_framework(self.test_model , __a ) self.assertEqual(__a , __a ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(__a ) __snake_case : List[Any] = FeaturesManager.determine_framework(__a , __a ) self.assertEqual(__a , __a ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(__a ) __snake_case : Tuple = FeaturesManager.determine_framework(__a , __a ) self.assertEqual(__a , __a ) def A_ ( self : Union[str, Any] ) -> Any: '''simple docstring''' # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(__a ) __snake_case : Tuple = FeaturesManager.determine_framework(__a ) self.assertEqual(__a , self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(__a ) __snake_case : Union[str, Any] = FeaturesManager.determine_framework(__a ) self.assertEqual(__a , self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(__a ): __snake_case : Optional[int] = FeaturesManager.determine_framework(__a ) def A_ ( self : Any ) -> List[Any]: '''simple docstring''' __snake_case : Union[str, Any] = MagicMock(return_value=__a ) with patch('transformers.onnx.features.is_tf_available' , __a ): __snake_case : int = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(__a , self.framework_pt ) # PyTorch not in environment -> use TensorFlow __snake_case : Tuple = MagicMock(return_value=__a ) with patch('transformers.onnx.features.is_torch_available' , __a ): __snake_case : Dict = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(__a , self.framework_tf ) # Both in environment -> use PyTorch __snake_case : Optional[Any] = MagicMock(return_value=__a ) __snake_case : Tuple = MagicMock(return_value=__a ) with patch('transformers.onnx.features.is_tf_available' , __a ), patch( 'transformers.onnx.features.is_torch_available' , __a ): __snake_case : Dict = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(__a , self.framework_pt ) # Both not in environment -> raise error __snake_case : str = MagicMock(return_value=__a ) __snake_case : List[Any] = MagicMock(return_value=__a ) with patch('transformers.onnx.features.is_tf_available' , __a ), patch( 'transformers.onnx.features.is_torch_available' , __a ): with self.assertRaises(__a ): __snake_case : Tuple = FeaturesManager.determine_framework(self.test_model )
0
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) A__ : str = { '''configuration_xlm_roberta''': [ '''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMRobertaConfig''', '''XLMRobertaOnnxConfig''', ], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Optional[int] = ['''XLMRobertaTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Any = ['''XLMRobertaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Union[str, Any] = [ '''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLMRobertaForCausalLM''', '''XLMRobertaForMaskedLM''', '''XLMRobertaForMultipleChoice''', '''XLMRobertaForQuestionAnswering''', '''XLMRobertaForSequenceClassification''', '''XLMRobertaForTokenClassification''', '''XLMRobertaModel''', '''XLMRobertaPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Tuple = [ '''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXLMRobertaForCausalLM''', '''TFXLMRobertaForMaskedLM''', '''TFXLMRobertaForMultipleChoice''', '''TFXLMRobertaForQuestionAnswering''', '''TFXLMRobertaForSequenceClassification''', '''TFXLMRobertaForTokenClassification''', '''TFXLMRobertaModel''', '''TFXLMRobertaPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Optional[int] = [ '''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FlaxXLMRobertaForMaskedLM''', '''FlaxXLMRobertaForCausalLM''', '''FlaxXLMRobertaForMultipleChoice''', '''FlaxXLMRobertaForQuestionAnswering''', '''FlaxXLMRobertaForSequenceClassification''', '''FlaxXLMRobertaForTokenClassification''', '''FlaxXLMRobertaModel''', '''FlaxXLMRobertaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig, XLMRobertaOnnxConfig, ) try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta import XLMRobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, XLMRobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm_roberta import ( TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMRobertaForCausalLM, TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, TFXLMRobertaForQuestionAnswering, TFXLMRobertaForSequenceClassification, TFXLMRobertaForTokenClassification, TFXLMRobertaModel, TFXLMRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xlm_roberta import ( FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxXLMRobertaForCausalLM, FlaxXLMRobertaForMaskedLM, FlaxXLMRobertaForMultipleChoice, FlaxXLMRobertaForQuestionAnswering, FlaxXLMRobertaForSequenceClassification, FlaxXLMRobertaForTokenClassification, FlaxXLMRobertaModel, FlaxXLMRobertaPreTrainedModel, ) else: import sys A__ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
0
'''simple docstring''' import os import unittest from transformers import BatchEncoding from transformers.models.bert.tokenization_bert import ( BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer from transformers.testing_utils import require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): A__ = ProphetNetTokenizer A__ = False def A_ ( self : Optional[int] ) -> Dict: '''simple docstring''' super().setUp() __snake_case : Dict = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] __snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def A_ ( self : int , __a : Union[str, Any] ) -> List[str]: '''simple docstring''' __snake_case : Optional[int] = 'UNwant\u00E9d,running' __snake_case : List[str] = 'unwanted, running' return input_text, output_text def A_ ( self : Union[str, Any] ) -> str: '''simple docstring''' __snake_case : Dict = self.tokenizer_class(self.vocab_file ) __snake_case : List[str] = tokenizer.tokenize('UNwant\u00E9d,running' ) self.assertListEqual(__a , ['un', '##want', '##ed', ',', 'runn', '##ing'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [9, 6, 7, 12, 10, 11] ) def A_ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' __snake_case : List[str] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] ) def A_ ( self : Union[str, Any] ) -> str: '''simple docstring''' __snake_case : Optional[int] = BasicTokenizer(do_lower_case=__a ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def A_ ( self : Dict ) -> Optional[int]: '''simple docstring''' __snake_case : List[Any] = BasicTokenizer(do_lower_case=__a , strip_accents=__a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] ) def A_ ( self : int ) -> Any: '''simple docstring''' __snake_case : int = BasicTokenizer(do_lower_case=__a , strip_accents=__a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def A_ ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' __snake_case : Union[str, Any] = BasicTokenizer(do_lower_case=__a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def A_ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' __snake_case : Dict = BasicTokenizer(do_lower_case=__a ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] ) def A_ ( self : Any ) -> List[str]: '''simple docstring''' __snake_case : str = BasicTokenizer(do_lower_case=__a , strip_accents=__a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] ) def A_ ( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' __snake_case : List[Any] = BasicTokenizer(do_lower_case=__a , strip_accents=__a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] ) def A_ ( self : Optional[int] ) -> List[str]: '''simple docstring''' __snake_case : Optional[Any] = BasicTokenizer(do_lower_case=__a , never_split=['[UNK]'] ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] ) def A_ ( self : Optional[int] ) -> List[Any]: '''simple docstring''' __snake_case : Any = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing'] __snake_case : List[Any] = {} for i, token in enumerate(__a ): __snake_case : List[str] = i __snake_case : Any = WordpieceTokenizer(vocab=__a , unk_token='[UNK]' ) self.assertListEqual(tokenizer.tokenize('' ) , [] ) self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] ) self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] ) @require_torch def A_ ( self : Union[str, Any] ) -> Tuple: '''simple docstring''' __snake_case : Optional[Any] = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' ) __snake_case : int = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] __snake_case : str = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102] __snake_case : Union[str, Any] = tokenizer(__a , padding=__a , return_tensors='pt' ) self.assertIsInstance(__a , __a ) __snake_case : int = list(batch.input_ids.numpy()[0] ) self.assertListEqual(__a , __a ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) def A_ ( self : Union[str, Any] ) -> Any: '''simple docstring''' self.assertTrue(_is_whitespace(' ' ) ) self.assertTrue(_is_whitespace('\t' ) ) self.assertTrue(_is_whitespace('\r' ) ) self.assertTrue(_is_whitespace('\n' ) ) self.assertTrue(_is_whitespace('\u00A0' ) ) self.assertFalse(_is_whitespace('A' ) ) self.assertFalse(_is_whitespace('-' ) ) def A_ ( self : Dict ) -> Optional[Any]: '''simple docstring''' self.assertTrue(_is_control('\u0005' ) ) self.assertFalse(_is_control('A' ) ) self.assertFalse(_is_control(' ' ) ) self.assertFalse(_is_control('\t' ) ) self.assertFalse(_is_control('\r' ) ) def A_ ( self : List[Any] ) -> int: '''simple docstring''' self.assertTrue(_is_punctuation('-' ) ) self.assertTrue(_is_punctuation('$' ) ) self.assertTrue(_is_punctuation('`' ) ) self.assertTrue(_is_punctuation('.' ) ) self.assertFalse(_is_punctuation('A' ) ) self.assertFalse(_is_punctuation(' ' ) ) @slow def A_ ( self : str ) -> Optional[int]: '''simple docstring''' __snake_case : str = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' ) __snake_case : Optional[int] = tokenizer.encode('sequence builders' , add_special_tokens=__a ) __snake_case : Optional[int] = tokenizer.encode('multi-sequence build' , add_special_tokens=__a ) __snake_case : Optional[Any] = tokenizer.build_inputs_with_special_tokens(__a ) __snake_case : List[Any] = tokenizer.build_inputs_with_special_tokens(__a , __a ) assert encoded_sentence == text + [102] assert encoded_pair == text + [102] + text_a + [102]
0
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available A__ : Optional[int] = { '''configuration_pix2struct''': [ '''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Pix2StructConfig''', '''Pix2StructTextConfig''', '''Pix2StructVisionConfig''', ], '''processing_pix2struct''': ['''Pix2StructProcessor'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Any = ['''Pix2StructImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Any = [ '''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Pix2StructPreTrainedModel''', '''Pix2StructForConditionalGeneration''', '''Pix2StructVisionModel''', '''Pix2StructTextModel''', ] if TYPE_CHECKING: from .configuration_pixastruct import ( PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP, PixaStructConfig, PixaStructTextConfig, PixaStructVisionConfig, ) from .processing_pixastruct import PixaStructProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_pixastruct import PixaStructImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_pixastruct import ( PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST, PixaStructForConditionalGeneration, PixaStructPreTrainedModel, PixaStructTextModel, PixaStructVisionModel, ) else: import sys A__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A__ : Optional[Any] = { '''configuration_nllb_moe''': [ '''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NllbMoeConfig''', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Dict = [ '''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''NllbMoeForConditionalGeneration''', '''NllbMoeModel''', '''NllbMoePreTrainedModel''', '''NllbMoeTop2Router''', '''NllbMoeSparseMLP''', ] if TYPE_CHECKING: from .configuration_nllb_moe import ( NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, NllbMoeConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nllb_moe import ( NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST, NllbMoeForConditionalGeneration, NllbMoeModel, NllbMoePreTrainedModel, NllbMoeSparseMLP, NllbMoeTopaRouter, ) else: import sys A__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
0
1
'''simple docstring''' import csv import tweepy # Twitter API credentials A__ : Optional[Any] = '''''' A__ : Optional[int] = '''''' A__ : List[str] = '''''' A__ : Union[str, Any] = '''''' def a_ ( _UpperCAmelCase : str ) -> None: # authorize twitter, initialize tweepy __snake_case : Dict = tweepy.OAuthHandler(_UpperCAmelCase ,_UpperCAmelCase ) auth.set_access_token(_UpperCAmelCase ,_UpperCAmelCase ) __snake_case : List[Any] = tweepy.API(_UpperCAmelCase ) # initialize a list to hold all the tweepy Tweets __snake_case : Optional[int] = [] # make initial request for most recent tweets (200 is the maximum allowed count) __snake_case : Tuple = api.user_timeline(screen_name=_UpperCAmelCase ,count=2_00 ) # save most recent tweets alltweets.extend(_UpperCAmelCase ) # save the id of the oldest tweet less one __snake_case : List[Any] = alltweets[-1].id - 1 # keep grabbing tweets until there are no tweets left to grab while len(_UpperCAmelCase ) > 0: print(f'''getting tweets before {oldest}''' ) # all subsequent requests use the max_id param to prevent duplicates __snake_case : str = api.user_timeline( screen_name=_UpperCAmelCase ,count=2_00 ,max_id=_UpperCAmelCase ) # save most recent tweets alltweets.extend(_UpperCAmelCase ) # update the id of the oldest tweet less one __snake_case : Union[str, Any] = alltweets[-1].id - 1 print(f'''...{len(_UpperCAmelCase )} tweets downloaded so far''' ) # transform the tweepy tweets into a 2D array that will populate the csv __snake_case : Any = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets] # write the csv with open(f'''new_{screen_name}_tweets.csv''' ,'w' ) as f: __snake_case : int = csv.writer(_UpperCAmelCase ) writer.writerow(['id', 'created_at', 'text'] ) writer.writerows(_UpperCAmelCase ) if __name__ == "__main__": # pass in the username of the account you want to download get_all_tweets('''FirePing32''')
0
'''simple docstring''' def a_ ( _UpperCAmelCase : int ) -> list: # bit count represents no. of bits in the gray code if bit_count < 0: raise ValueError('The given input must be positive' ) # get the generated string sequence __snake_case : Optional[Any] = gray_code_sequence_string(_UpperCAmelCase ) # # convert them to integers for i in range(len(_UpperCAmelCase ) ): __snake_case : Optional[Any] = int(sequence[i] ,2 ) return sequence def a_ ( _UpperCAmelCase : int ) -> list: # The approach is a recursive one # Base case achieved when either n = 0 or n=1 if bit_count == 0: return ["0"] if bit_count == 1: return ["0", "1"] __snake_case : Dict = 1 << bit_count # defines the length of the sequence # 1<< n is equivalent to 2^n # recursive answer will generate answer for n-1 bits __snake_case : Dict = gray_code_sequence_string(bit_count - 1 ) __snake_case : Any = [] # append 0 to first half of the smaller sequence generated for i in range(seq_len // 2 ): __snake_case : str = '0' + smaller_sequence[i] sequence.append(_UpperCAmelCase ) # append 1 to second half ... start from the end of the list for i in reversed(range(seq_len // 2 ) ): __snake_case : Any = '1' + smaller_sequence[i] sequence.append(_UpperCAmelCase ) return sequence if __name__ == "__main__": import doctest doctest.testmod()
0
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) A__ : Optional[Any] = { '''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''], '''processing_trocr''': ['''TrOCRProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Optional[int] = [ '''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TrOCRForCausalLM''', '''TrOCRPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys A__ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
0
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class snake_case__ ( unittest.TestCase ): def A_ ( self : int ) -> List[Any]: '''simple docstring''' __snake_case : Any = tempfile.mkdtemp() # fmt: off __snake_case : List[str] = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest'] # fmt: on __snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) __snake_case : List[str] = { 'do_resize': True, 'size': {'height': 18, 'width': 18}, 'do_normalize': True, 'image_mean': [0.5, 0.5, 0.5], 'image_std': [0.5, 0.5, 0.5], } __snake_case : Optional[Any] = os.path.join(self.tmpdirname , __a ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(__a , __a ) def A_ ( self : Optional[int] , **__a : Dict ) -> int: '''simple docstring''' return BertTokenizer.from_pretrained(self.tmpdirname , **__a ) def A_ ( self : int , **__a : Dict ) -> Tuple: '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname , **__a ) def A_ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def A_ ( self : str ) -> List[str]: '''simple docstring''' __snake_case : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] __snake_case : List[str] = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs] return image_inputs def A_ ( self : List[str] ) -> Optional[int]: '''simple docstring''' __snake_case : Union[str, Any] = self.get_tokenizer() __snake_case : Dict = self.get_image_processor() __snake_case : Any = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) processor.save_pretrained(self.tmpdirname ) __snake_case : Any = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , __a ) def A_ ( self : str ) -> Optional[int]: '''simple docstring''' __snake_case : Optional[Any] = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __snake_case : Optional[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) __snake_case : Tuple = self.get_image_processor(do_normalize=__a , padding_value=1.0 ) __snake_case : Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __a ) def A_ ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' __snake_case : Tuple = self.get_image_processor() __snake_case : int = self.get_tokenizer() __snake_case : str = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) __snake_case : int = self.prepare_image_inputs() __snake_case : List[str] = image_processor(__a , return_tensors='np' ) __snake_case : List[str] = processor(images=__a , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def A_ ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' __snake_case : Dict = self.get_image_processor() __snake_case : int = self.get_tokenizer() __snake_case : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) __snake_case : Optional[int] = 'lower newer' __snake_case : Dict = processor(text=__a ) __snake_case : List[Any] = tokenizer(__a ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def A_ ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' __snake_case : Dict = self.get_image_processor() __snake_case : Union[str, Any] = self.get_tokenizer() __snake_case : int = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) __snake_case : List[Any] = 'lower newer' __snake_case : Optional[Any] = self.prepare_image_inputs() __snake_case : Union[str, Any] = processor(text=__a , images=__a ) self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with self.assertRaises(__a ): processor() def A_ ( self : Tuple ) -> Any: '''simple docstring''' __snake_case : Union[str, Any] = self.get_image_processor() __snake_case : Any = self.get_tokenizer() __snake_case : Dict = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) __snake_case : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __snake_case : int = processor.batch_decode(__a ) __snake_case : Optional[Any] = tokenizer.batch_decode(__a ) self.assertListEqual(__a , __a ) def A_ ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' __snake_case : List[str] = self.get_image_processor() __snake_case : Dict = self.get_tokenizer() __snake_case : Dict = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) __snake_case : Union[str, Any] = 'lower newer' __snake_case : Tuple = self.prepare_image_inputs() __snake_case : Union[str, Any] = processor(text=__a , images=__a ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
0
1
'''simple docstring''' import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def a_ ( _UpperCAmelCase : int ) -> List[str]: __snake_case : int = os.path.join(args.tf_model_dir ,'parameters.json' ) __snake_case : Union[str, Any] = json.loads(open(_UpperCAmelCase ).read() ) if not params: raise ValueError( f'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' ) if not args.output.endswith('.pt' ): __snake_case : int = args.output + '.pt' __snake_case : str = OrderedDict() with tf.device('/CPU:0' ): __snake_case : int = tf.train.load_checkpoint(args.tf_model_dir ) __snake_case : Any = reader.get_variable_to_shape_map() for key_name in shapes.keys(): __snake_case : Any = reader.get_tensor(_UpperCAmelCase ).astype(np.floataa ) if key_name.endswith('/adam_m' ) or key_name.endswith('/adam_v' ): continue if key_name.startswith('pasts/' ): if key_name.startswith('pasts/mlp' ): __snake_case : List[Any] = int(key_name[9] ) elif key_name.startswith('pasts/out' ): __snake_case : str = 8 __snake_case : Optional[int] = 'model.sqout.%d.weight' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time __snake_case : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix __snake_case : str = torch.tensor(_UpperCAmelCase ) elif key_name.startswith('model/moe' ): __snake_case : str = int(key_name[9:].split('/' )[0] ) if key_name.endswith('/switch_gating/kernel' ): __snake_case : Tuple = 'model.blocks.%d.feed_forward.mlp.router.classifier.weight' % player __snake_case : Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix __snake_case : int = torch.tensor(_UpperCAmelCase ) elif key_name.endswith('/softmlp/kernel' ): __snake_case : Union[str, Any] = 'model.blocks.%d.feed_forward.soft_bypass_mlp.weight' % player __snake_case : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix __snake_case : Optional[int] = torch.tensor(_UpperCAmelCase ) elif key_name.endswith('/wo/kernel' ) or key_name.endswith('/wi/kernel' ): __snake_case : Union[str, Any] = key_name[-9:-7] for i in range(16 ): __snake_case : str = 'model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight' % (player, i, nlayer) __snake_case : Optional[Any] = ( vnp[i].transpose([1, 0] ).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided __snake_case : Dict = torch.tensor(_UpperCAmelCase ) elif key_name.startswith('model/mlp' ): __snake_case : Optional[Any] = int(key_name[9:].split('/' )[0] ) if key_name.endswith('/p1/kernel' ): __snake_case : Dict = 'model.blocks.%d.feed_forward.mlp.wi.weight' % player __snake_case : Tuple = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix __snake_case : Any = torch.tensor(_UpperCAmelCase ) elif key_name.endswith('/p1/bias' ): __snake_case : Union[str, Any] = 'model.blocks.%d.feed_forward.mlp.wi.bias' % player __snake_case : Optional[Any] = vnp.copy() # same because it is one dimensional __snake_case : List[str] = torch.tensor(_UpperCAmelCase ) elif key_name.endswith('/p2/kernel' ): __snake_case : Dict = 'model.blocks.%d.feed_forward.mlp.wo.weight' % player __snake_case : Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix __snake_case : List[Any] = torch.tensor(_UpperCAmelCase ) elif key_name.endswith('/p2/bias' ): __snake_case : str = 'model.blocks.%d.feed_forward.mlp.wo.bias' % player __snake_case : Dict = vnp.copy() # same because it is one dimensional __snake_case : str = torch.tensor(_UpperCAmelCase ) elif key_name.startswith('model/ln' ): __snake_case : Optional[Any] = int(key_name[8:].split('/' )[0] ) if key_name.endswith('/b' ): __snake_case : Optional[int] = 'model.blocks.%d.feed_forward.norm.bias' % player __snake_case : Any = vnp.copy() # same because it is one dimensional __snake_case : Dict = torch.tensor(_UpperCAmelCase ) elif key_name.endswith('/g' ): __snake_case : Dict = 'model.blocks.%d.feed_forward.norm.weight' % player __snake_case : Optional[int] = vnp.copy() # same because it is one dimensional __snake_case : Tuple = torch.tensor(_UpperCAmelCase ) elif key_name.startswith('model/att' ): __snake_case : Union[str, Any] = int(key_name[9:].split('/' )[0] ) if key_name.endswith('/qkv/kernel' ): __snake_case : int = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum __snake_case : Dict = state[:, 0, :, :] __snake_case : Optional[Any] = state[:, 1, :, :] __snake_case : Tuple = state[:, 2, :, :] __snake_case : str = ( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix __snake_case : Optional[int] = ( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix __snake_case : Dict = ( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix __snake_case : List[str] = 'model.blocks.%d.self_attn.self_attn.q_proj.weight' % player __snake_case : int = torch.tensor(_UpperCAmelCase ) __snake_case : Optional[int] = 'model.blocks.%d.self_attn.self_attn.k_proj.weight' % player __snake_case : Optional[Any] = torch.tensor(_UpperCAmelCase ) __snake_case : List[str] = 'model.blocks.%d.self_attn.self_attn.v_proj.weight' % player __snake_case : Union[str, Any] = torch.tensor(_UpperCAmelCase ) elif key_name.endswith('/o/kernel' ): __snake_case : Optional[Any] = 'model.blocks.%d.self_attn.self_attn.out_proj.weight' % player __snake_case : List[Any] = ( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy() ) # Mesh-Tensorflow is a diagonal matrix __snake_case : Optional[int] = torch.tensor(_UpperCAmelCase ) elif key_name.startswith('model/an' ): __snake_case : Union[str, Any] = int(key_name[8:].split('/' )[0] ) if key_name.endswith('/b' ): __snake_case : Tuple = 'model.blocks.%d.self_attn.norm.bias' % player __snake_case : List[str] = vnp.copy() # same because it is one dimensional __snake_case : List[Any] = torch.tensor(_UpperCAmelCase ) elif key_name.endswith('/g' ): __snake_case : List[Any] = 'model.blocks.%d.self_attn.norm.weight' % player __snake_case : Optional[int] = vnp.copy() # same because it is one dimensional __snake_case : List[str] = torch.tensor(_UpperCAmelCase ) elif ( key_name.startswith('model/wte' ) or key_name.startswith('model/wpe' ) or key_name.startswith('model/ete' ) ): __snake_case : Dict = {'wte': 'embed_tokens', 'wpe': 'position_embeddings', 'ete': 'extra_position_embeddings'}[ key_name[-3:] ] __snake_case : List[Any] = 'model.%s.weight' % nlayer __snake_case : Any = vnp.copy() # same in embedded __snake_case : Union[str, Any] = torch.tensor(_UpperCAmelCase ) if key_name.startswith('model/wte' ): __snake_case : Union[str, Any] = 'lm_head.weight' __snake_case : List[Any] = vnp.copy() # same in embedded __snake_case : Tuple = torch.tensor(_UpperCAmelCase ) elif key_name.startswith('model/wob' ): __snake_case : List[str] = 'final_logits_bias' __snake_case : Optional[Any] = vnp.copy() # same in embedded __snake_case : Any = state.reshape((1, -1) ) __snake_case : Optional[int] = torch.tensor(_UpperCAmelCase ) elif key_name == "model/dense/kernel": __snake_case : Optional[int] = 'model.last_project.weight' __snake_case : List[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix __snake_case : int = torch.tensor(_UpperCAmelCase ) elif key_name == "model/dense_1/bias": __snake_case : Tuple = 'model.last_project.bias' __snake_case : Dict = vnp.copy() # same because it is one dimensional __snake_case : Dict = torch.tensor(_UpperCAmelCase ) torch.save(_UpperCAmelCase ,args.output ) if __name__ == "__main__": A__ : int = argparse.ArgumentParser( description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''') parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''') A__ : Dict = parser.parse_args() convert_tf_gptsan_to_pt(args)
0
'''simple docstring''' import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def a_ ( _UpperCAmelCase : List[Any] ) -> Tuple: __snake_case : str = [] embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''', f'''stage{idx}.patch_embed.proj.weight''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''', f'''stage{idx}.patch_embed.proj.bias''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''', f'''stage{idx}.patch_embed.norm.weight''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''', f'''stage{idx}.patch_embed.norm.bias''', ) ) return embed def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : Optional[int] ) -> List[str]: __snake_case : Tuple = [] attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj.bias''', ) ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') ) return attention_weights def a_ ( _UpperCAmelCase : Union[str, Any] ) -> Dict: __snake_case : Union[str, Any] = [] token.append((f'''cvt.encoder.stages.{idx}.cls_token''', 'stage2.cls_token') ) return token def a_ ( ) -> Optional[Any]: __snake_case : Any = [] head.append(('layernorm.weight', 'norm.weight') ) head.append(('layernorm.bias', 'norm.bias') ) head.append(('classifier.weight', 'head.weight') ) head.append(('classifier.bias', 'head.bias') ) return head def a_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Any ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[Any] ) -> Tuple: __snake_case : List[str] = 'imagenet-1k-id2label.json' __snake_case : Dict = 10_00 __snake_case : Union[str, Any] = 'huggingface/label-files' __snake_case : str = num_labels __snake_case : str = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase ,_UpperCAmelCase ,repo_type='dataset' ) ) ,'r' ) ) __snake_case : Tuple = {int(_UpperCAmelCase ): v for k, v in idalabel.items()} __snake_case : Optional[Any] = idalabel __snake_case : str = {v: k for k, v in idalabel.items()} __snake_case : Dict = CvtConfig(num_labels=_UpperCAmelCase ,idalabel=_UpperCAmelCase ,labelaid=_UpperCAmelCase ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit('/' ,1 )[-1][4:6] == "13": __snake_case : Tuple = [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit('/' ,1 )[-1][4:6] == "21": __snake_case : str = [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: __snake_case : Dict = [2, 2, 20] __snake_case : Any = [3, 12, 16] __snake_case : Tuple = [1_92, 7_68, 10_24] __snake_case : str = CvtForImageClassification(_UpperCAmelCase ) __snake_case : List[Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ) __snake_case : int = image_size __snake_case : int = torch.load(_UpperCAmelCase ,map_location=torch.device('cpu' ) ) __snake_case : List[Any] = OrderedDict() __snake_case : Union[str, Any] = [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: __snake_case : Optional[Any] = list_of_state_dict + cls_token(_UpperCAmelCase ) __snake_case : Tuple = list_of_state_dict + embeddings(_UpperCAmelCase ) for cnt in range(config.depth[idx] ): __snake_case : Optional[int] = list_of_state_dict + attention(_UpperCAmelCase ,_UpperCAmelCase ) __snake_case : str = list_of_state_dict + final() for gg in list_of_state_dict: print(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) ): __snake_case : List[str] = original_weights[list_of_state_dict[i][1]] model.load_state_dict(_UpperCAmelCase ) model.save_pretrained(_UpperCAmelCase ) image_processor.save_pretrained(_UpperCAmelCase ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": A__ : Dict = argparse.ArgumentParser() parser.add_argument( '''--cvt_model''', default='''cvt-w24''', type=str, help='''Name of the cvt model you\'d like to convert.''', ) parser.add_argument( '''--image_size''', default=3_8_4, type=int, help='''Input Image Size''', ) parser.add_argument( '''--cvt_file_name''', default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''', type=str, help='''Input Image Size''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) A__ : Tuple = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
0
1
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging A__ : List[Any] = logging.get_logger(__name__) A__ : Dict = { '''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''', # See all WavLM models at https://huggingface.co/models?filter=wavlm } class snake_case__ ( SCREAMING_SNAKE_CASE_ ): A__ = '''wavlm''' def __init__( self : Optional[int] , __a : Any=32 , __a : Optional[int]=768 , __a : Optional[int]=12 , __a : Optional[int]=12 , __a : Dict=3072 , __a : List[str]="gelu" , __a : Union[str, Any]=0.1 , __a : int=0.1 , __a : int=0.1 , __a : List[Any]=0.0 , __a : str=0.1 , __a : Union[str, Any]=0.1 , __a : Optional[int]=0.0_2 , __a : List[str]=1e-5 , __a : int="group" , __a : Tuple="gelu" , __a : Optional[Any]=(512, 512, 512, 512, 512, 512, 512) , __a : Any=(5, 2, 2, 2, 2, 2, 2) , __a : Optional[int]=(10, 3, 3, 3, 3, 2, 2) , __a : Union[str, Any]=False , __a : Optional[Any]=128 , __a : List[str]=16 , __a : Dict=320 , __a : Tuple=800 , __a : List[str]=False , __a : Optional[Any]=True , __a : Dict=0.0_5 , __a : Dict=10 , __a : List[Any]=2 , __a : Tuple=0.0 , __a : Dict=10 , __a : List[Any]=320 , __a : Optional[int]=2 , __a : Union[str, Any]=0.1 , __a : Optional[Any]=100 , __a : Union[str, Any]=256 , __a : str=256 , __a : Union[str, Any]=0.1 , __a : str="mean" , __a : str=False , __a : str=False , __a : Optional[int]=256 , __a : Union[str, Any]=(512, 512, 512, 512, 1500) , __a : int=(5, 3, 3, 1, 1) , __a : Optional[Any]=(1, 2, 3, 1, 1) , __a : Union[str, Any]=512 , __a : Optional[Any]=80 , __a : Tuple=0 , __a : Optional[Any]=1 , __a : List[Any]=2 , __a : List[Any]=False , __a : Any=3 , __a : Any=2 , __a : Optional[Any]=3 , __a : Optional[Any]=None , **__a : Union[str, Any] , ) -> List[Any]: '''simple docstring''' super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a ) __snake_case : Tuple = hidden_size __snake_case : Optional[Any] = feat_extract_norm __snake_case : List[str] = feat_extract_activation __snake_case : str = list(__a ) __snake_case : List[str] = list(__a ) __snake_case : List[str] = list(__a ) __snake_case : Any = conv_bias __snake_case : Tuple = num_buckets __snake_case : List[Any] = max_bucket_distance __snake_case : List[Any] = num_conv_pos_embeddings __snake_case : str = num_conv_pos_embedding_groups __snake_case : List[str] = len(self.conv_dim ) __snake_case : List[str] = num_hidden_layers __snake_case : int = intermediate_size __snake_case : Any = hidden_act __snake_case : List[Any] = num_attention_heads __snake_case : List[str] = hidden_dropout __snake_case : Optional[int] = attention_dropout __snake_case : Union[str, Any] = activation_dropout __snake_case : List[str] = feat_proj_dropout __snake_case : List[str] = final_dropout __snake_case : List[str] = layerdrop __snake_case : Any = layer_norm_eps __snake_case : Any = initializer_range __snake_case : Any = num_ctc_classes __snake_case : Optional[Any] = vocab_size __snake_case : str = do_stable_layer_norm __snake_case : Any = use_weighted_layer_sum __snake_case : Dict = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __snake_case : str = apply_spec_augment __snake_case : str = mask_time_prob __snake_case : Union[str, Any] = mask_time_length __snake_case : List[Any] = mask_time_min_masks __snake_case : List[Any] = mask_feature_prob __snake_case : List[str] = mask_feature_length # parameters for pretraining with codevector quantized representations __snake_case : int = num_codevectors_per_group __snake_case : int = num_codevector_groups __snake_case : List[str] = contrastive_logits_temperature __snake_case : List[str] = num_negatives __snake_case : int = codevector_dim __snake_case : Optional[Any] = proj_codevector_dim __snake_case : str = diversity_loss_weight # ctc loss __snake_case : Any = ctc_loss_reduction __snake_case : Optional[int] = ctc_zero_infinity # adapter __snake_case : List[str] = add_adapter __snake_case : List[str] = adapter_kernel_size __snake_case : List[Any] = adapter_stride __snake_case : Tuple = num_adapter_layers __snake_case : Dict = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. __snake_case : Tuple = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. __snake_case : Optional[int] = list(__a ) __snake_case : Union[str, Any] = list(__a ) __snake_case : Optional[int] = list(__a ) __snake_case : Optional[Any] = xvector_output_dim @property def A_ ( self : List[Any] ) -> str: '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1 )
0
'''simple docstring''' from __future__ import annotations A__ : List[Any] = list[list[int]] # assigning initial values to the grid A__ : Matrix = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution A__ : Matrix = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def a_ ( _UpperCAmelCase : Matrix ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ) -> bool: for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def a_ ( _UpperCAmelCase : Matrix ) -> tuple[int, int] | None: for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def a_ ( _UpperCAmelCase : Matrix ) -> Matrix | None: if location := find_empty_location(_UpperCAmelCase ): __snake_case , __snake_case : Optional[int] = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 ,10 ): if is_safe(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ): __snake_case : Union[str, Any] = digit if sudoku(_UpperCAmelCase ) is not None: return grid __snake_case : Optional[Any] = 0 return None def a_ ( _UpperCAmelCase : Matrix ) -> None: for row in grid: for cell in row: print(_UpperCAmelCase ,end=' ' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print('''\nExample grid:\n''' + '''=''' * 2_0) print_solution(example_grid) print('''\nExample grid solution:''') A__ : List[str] = sudoku(example_grid) if solution is not None: print_solution(solution) else: print('''Cannot find a solution.''')
0
1
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: A__ : Union[str, Any] = None A__ : Optional[int] = logging.get_logger(__name__) A__ : Dict = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} A__ : Tuple = { '''vocab_file''': { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''', }, '''tokenizer_file''': { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''', }, } A__ : List[Any] = { '''camembert-base''': 5_1_2, } A__ : Optional[Any] = '''▁''' class snake_case__ ( SCREAMING_SNAKE_CASE_ ): A__ = VOCAB_FILES_NAMES A__ = PRETRAINED_VOCAB_FILES_MAP A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ = ['''input_ids''', '''attention_mask'''] A__ = CamembertTokenizer def __init__( self : Any , __a : Dict=None , __a : Optional[Any]=None , __a : int="<s>" , __a : List[Any]="</s>" , __a : Union[str, Any]="</s>" , __a : Optional[Any]="<s>" , __a : Tuple="<unk>" , __a : int="<pad>" , __a : List[Any]="<mask>" , __a : int=["<s>NOTUSED", "</s>NOTUSED"] , **__a : List[str] , ) -> Tuple: '''simple docstring''' # Mask token behave like a normal word, i.e. include the space before it __snake_case : Tuple = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token super().__init__( __a , tokenizer_file=__a , bos_token=__a , eos_token=__a , sep_token=__a , cls_token=__a , unk_token=__a , pad_token=__a , mask_token=__a , additional_special_tokens=__a , **__a , ) __snake_case : Optional[Any] = vocab_file __snake_case : str = False if not self.vocab_file else True def A_ ( self : str , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __snake_case : List[str] = [self.cls_token_id] __snake_case : Dict = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def A_ ( self : Tuple , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' __snake_case : Union[str, Any] = [self.sep_token_id] __snake_case : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def A_ ( self : Dict , __a : str , __a : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(__a ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __snake_case : str = os.path.join( __a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ): copyfile(self.vocab_file , __a ) return (out_vocab_file,)
0
'''simple docstring''' import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import enable_full_determinism, skip_mps from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): A__ = KandinskyVaaPriorPipeline A__ = ['''prompt'''] A__ = ['''prompt''', '''negative_prompt'''] A__ = [ '''num_images_per_prompt''', '''generator''', '''num_inference_steps''', '''latents''', '''negative_prompt''', '''guidance_scale''', '''output_type''', '''return_dict''', ] A__ = False @property def A_ ( self : Dict ) -> List[str]: '''simple docstring''' return 32 @property def A_ ( self : Any ) -> str: '''simple docstring''' return 32 @property def A_ ( self : str ) -> Optional[int]: '''simple docstring''' return self.time_input_dim @property def A_ ( self : str ) -> int: '''simple docstring''' return self.time_input_dim * 4 @property def A_ ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' return 100 @property def A_ ( self : Tuple ) -> List[str]: '''simple docstring''' __snake_case : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def A_ ( self : Dict ) -> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) __snake_case : Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(__a ) @property def A_ ( self : Union[str, Any] ) -> Any: '''simple docstring''' torch.manual_seed(0 ) __snake_case : Any = { 'num_attention_heads': 2, 'attention_head_dim': 12, 'embedding_dim': self.text_embedder_hidden_size, 'num_layers': 1, } __snake_case : List[Any] = PriorTransformer(**__a ) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 __snake_case : Any = nn.Parameter(torch.ones(model.clip_std.shape ) ) return model @property def A_ ( self : List[str] ) -> List[str]: '''simple docstring''' torch.manual_seed(0 ) __snake_case : Optional[Any] = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , ) __snake_case : Optional[Any] = CLIPVisionModelWithProjection(__a ) return model @property def A_ ( self : Dict ) -> List[Any]: '''simple docstring''' __snake_case : Dict = CLIPImageProcessor( crop_size=224 , do_center_crop=__a , do_normalize=__a , do_resize=__a , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=224 , ) return image_processor def A_ ( self : Dict ) -> Optional[int]: '''simple docstring''' __snake_case : Tuple = self.dummy_prior __snake_case : List[str] = self.dummy_image_encoder __snake_case : str = self.dummy_text_encoder __snake_case : List[str] = self.dummy_tokenizer __snake_case : List[str] = self.dummy_image_processor __snake_case : Any = UnCLIPScheduler( variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=__a , clip_sample_range=1_0.0 , ) __snake_case : str = { 'prior': prior, 'image_encoder': image_encoder, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'scheduler': scheduler, 'image_processor': image_processor, } return components def A_ ( self : List[Any] , __a : Optional[Any] , __a : Tuple=0 ) -> Any: '''simple docstring''' if str(__a ).startswith('mps' ): __snake_case : List[str] = torch.manual_seed(__a ) else: __snake_case : List[str] = torch.Generator(device=__a ).manual_seed(__a ) __snake_case : List[Any] = { 'prompt': 'horse', 'generator': generator, 'guidance_scale': 4.0, 'num_inference_steps': 2, 'output_type': 'np', } return inputs def A_ ( self : str ) -> Dict: '''simple docstring''' __snake_case : str = 'cpu' __snake_case : List[str] = self.get_dummy_components() __snake_case : Tuple = self.pipeline_class(**__a ) __snake_case : Optional[Any] = pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) __snake_case : Optional[int] = pipe(**self.get_dummy_inputs(__a ) ) __snake_case : List[str] = output.image_embeds __snake_case : str = pipe( **self.get_dummy_inputs(__a ) , return_dict=__a , )[0] __snake_case : Union[str, Any] = image[0, -10:] __snake_case : Any = image_from_tuple[0, -10:] assert image.shape == (1, 32) __snake_case : List[Any] = np.array( [-0.0_5_3_2, 1.7_1_2_0, 0.3_6_5_6, -1.0_8_5_2, -0.8_9_4_6, -1.1_7_5_6, 0.4_3_4_8, 0.2_4_8_2, 0.5_1_4_6, -0.1_1_5_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def A_ ( self : Tuple ) -> Optional[int]: '''simple docstring''' __snake_case : Union[str, Any] = torch_device == 'cpu' __snake_case : Dict = True __snake_case : Union[str, Any] = False self._test_inference_batch_single_identical( test_max_difference=__a , relax_max_difference=__a , test_mean_pixel_difference=__a , ) @skip_mps def A_ ( self : str ) -> Union[str, Any]: '''simple docstring''' __snake_case : Dict = torch_device == 'cpu' __snake_case : Optional[Any] = False self._test_attention_slicing_forward_pass( test_max_difference=__a , test_mean_pixel_difference=__a , )
0
1
'''simple docstring''' from ..utils import DummyObject, requires_backends class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : Optional[Any] , *__a : int , **__a : Any ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : Union[str, Any] , *__a : Any , **__a : Any ) -> List[Any]: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : Optional[Any] , *__a : str , **__a : Union[str, Any] ) -> int: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : str , *__a : Tuple , **__a : Tuple ) -> str: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : str , *__a : Optional[Any] , **__a : int ) -> Tuple: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : List[Any] , *__a : int , **__a : int ) -> Dict: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : Any , *__a : List[str] , **__a : int ) -> Dict: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : str , *__a : int , **__a : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : List[str] , *__a : Dict , **__a : str ) -> Any: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : str , *__a : List[Any] , **__a : Dict ) -> int: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : Any , *__a : str , **__a : Tuple ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : List[str] , *__a : Dict , **__a : str ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : int , *__a : List[str] , **__a : Union[str, Any] ) -> Dict: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : Any , *__a : Tuple , **__a : Tuple ) -> List[Any]: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : str , *__a : int , **__a : Dict ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : Any , *__a : Dict , **__a : List[Any] ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : Optional[int] , *__a : str , **__a : List[str] ) -> str: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : Union[str, Any] , *__a : Optional[Any] , **__a : Any ) -> str: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : List[Any] , *__a : int , **__a : Optional[int] ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : Tuple , *__a : Any , **__a : Dict ) -> Optional[Any]: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : Any , *__a : Optional[Any] , **__a : int ) -> Tuple: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : Tuple , *__a : List[Any] , **__a : int ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : List[str] , *__a : str , **__a : Optional[Any] ) -> Dict: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : Tuple , *__a : Optional[int] , **__a : Tuple ) -> Dict: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : str , *__a : Union[str, Any] , **__a : int ) -> Any: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : str , *__a : int , **__a : Any ) -> Tuple: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : str , *__a : Dict , **__a : Any ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : Optional[int] , *__a : Union[str, Any] , **__a : Optional[int] ) -> List[Any]: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : List[Any] , *__a : Dict , **__a : Optional[int] ) -> Dict: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : Dict , *__a : List[Any] , **__a : Tuple ) -> Tuple: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : List[Any] , *__a : List[Any] , **__a : List[str] ) -> Dict: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : List[Any] , *__a : Union[str, Any] , **__a : Tuple ) -> Dict: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : List[Any] , *__a : Dict , **__a : str ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ['torch'] ) def a_ ( *_UpperCAmelCase : List[Any] ,**_UpperCAmelCase : Optional[Any] ) -> str: requires_backends(_UpperCAmelCase ,['torch'] ) def a_ ( *_UpperCAmelCase : Optional[int] ,**_UpperCAmelCase : Tuple ) -> List[Any]: requires_backends(_UpperCAmelCase ,['torch'] ) def a_ ( *_UpperCAmelCase : Any ,**_UpperCAmelCase : List[Any] ) -> Optional[Any]: requires_backends(_UpperCAmelCase ,['torch'] ) def a_ ( *_UpperCAmelCase : Tuple ,**_UpperCAmelCase : str ) -> List[str]: requires_backends(_UpperCAmelCase ,['torch'] ) def a_ ( *_UpperCAmelCase : List[str] ,**_UpperCAmelCase : Union[str, Any] ) -> Dict: requires_backends(_UpperCAmelCase ,['torch'] ) def a_ ( *_UpperCAmelCase : Union[str, Any] ,**_UpperCAmelCase : List[str] ) -> Dict: requires_backends(_UpperCAmelCase ,['torch'] ) def a_ ( *_UpperCAmelCase : List[Any] ,**_UpperCAmelCase : str ) -> Optional[int]: requires_backends(_UpperCAmelCase ,['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : Dict , *__a : Tuple , **__a : Any ) -> str: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : str , *__a : Any , **__a : str ) -> Tuple: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : int , *__a : int , **__a : Union[str, Any] ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : str , *__a : Optional[int] , **__a : Union[str, Any] ) -> int: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : Optional[Any] , *__a : List[str] , **__a : Union[str, Any] ) -> str: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : List[Any] , *__a : Union[str, Any] , **__a : Any ) -> int: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : Any , *__a : str , **__a : Tuple ) -> List[Any]: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : int , *__a : Optional[int] , **__a : Tuple ) -> List[str]: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : int , *__a : Union[str, Any] , **__a : List[str] ) -> Tuple: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : Union[str, Any] , *__a : List[str] , **__a : List[Any] ) -> int: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : Optional[Any] , *__a : Any , **__a : Optional[Any] ) -> Any: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : int , *__a : Optional[int] , **__a : int ) -> int: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : Any , *__a : List[Any] , **__a : int ) -> List[str]: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : Tuple , *__a : Tuple , **__a : int ) -> str: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : List[str] , *__a : Optional[int] , **__a : List[Any] ) -> List[Any]: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : Union[str, Any] , *__a : Optional[int] , **__a : Any ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : Dict , *__a : str , **__a : Tuple ) -> str: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : Optional[int] , *__a : Dict , **__a : Any ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : Optional[int] , *__a : Tuple , **__a : Dict ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : Optional[Any] , *__a : List[str] , **__a : Union[str, Any] ) -> Tuple: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : str , *__a : int , **__a : Any ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : Any , *__a : Tuple , **__a : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : Optional[Any] , *__a : Union[str, Any] , **__a : List[Any] ) -> List[Any]: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : str , *__a : Union[str, Any] , **__a : Dict ) -> str: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : int , *__a : Union[str, Any] , **__a : Optional[Any] ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : Optional[Any] , *__a : Dict , **__a : int ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : Tuple , *__a : str , **__a : int ) -> str: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : Dict , *__a : str , **__a : List[Any] ) -> Any: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : Tuple , *__a : Optional[Any] , **__a : Union[str, Any] ) -> int: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : List[Any] , *__a : Optional[int] , **__a : Union[str, Any] ) -> int: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : List[Any] , *__a : int , **__a : Any ) -> List[str]: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : List[Any] , *__a : List[Any] , **__a : str ) -> str: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : List[str] , *__a : Any , **__a : List[str] ) -> Dict: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : int , *__a : Union[str, Any] , **__a : Optional[int] ) -> Tuple: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : Optional[Any] , *__a : Tuple , **__a : Any ) -> Optional[Any]: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : List[str] , *__a : Tuple , **__a : Optional[Any] ) -> List[str]: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : List[Any] , *__a : List[Any] , **__a : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : List[Any] , *__a : List[Any] , **__a : Tuple ) -> List[Any]: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : int , *__a : List[Any] , **__a : Optional[int] ) -> Any: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : Dict , *__a : Any , **__a : Optional[int] ) -> List[str]: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : Optional[int] , *__a : Any , **__a : Any ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : Any , *__a : List[Any] , **__a : Union[str, Any] ) -> int: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : Tuple , *__a : Dict , **__a : Optional[Any] ) -> Tuple: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : Any , *__a : int , **__a : Union[str, Any] ) -> int: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : Any , *__a : List[Any] , **__a : int ) -> Dict: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : Any , *__a : Optional[Any] , **__a : str ) -> List[Any]: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : str , *__a : Any , **__a : int ) -> Any: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : int , *__a : Union[str, Any] , **__a : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : Optional[int] , *__a : Optional[int] , **__a : int ) -> str: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : Any , *__a : List[Any] , **__a : str ) -> Dict: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : List[Any] , *__a : Union[str, Any] , **__a : List[Any] ) -> str: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : List[str] , *__a : Dict , **__a : List[str] ) -> Any: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : List[Any] , *__a : str , **__a : Any ) -> int: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : Union[str, Any] , *__a : int , **__a : List[str] ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : List[Any] , *__a : str , **__a : int ) -> List[Any]: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : Tuple , *__a : List[Any] , **__a : Dict ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : List[str] , *__a : List[Any] , **__a : Optional[int] ) -> Tuple: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : int , *__a : Optional[int] , **__a : List[Any] ) -> Optional[int]: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : List[str] , *__a : str , **__a : str ) -> Dict: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : List[str] , *__a : Optional[Any] , **__a : Optional[Any] ) -> Dict: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : List[str] , *__a : List[str] , **__a : List[str] ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : Tuple , *__a : List[Any] , **__a : List[str] ) -> Any: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : Tuple , *__a : List[str] , **__a : List[str] ) -> str: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : Any , *__a : Tuple , **__a : List[str] ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : List[str] , *__a : Union[str, Any] , **__a : Tuple ) -> int: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : List[str] , *__a : Union[str, Any] , **__a : Optional[Any] ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : Any , *__a : List[str] , **__a : List[str] ) -> Optional[Any]: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : Optional[int] , *__a : List[Any] , **__a : int ) -> Tuple: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : Dict , *__a : List[Any] , **__a : List[str] ) -> Any: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : str , *__a : Tuple , **__a : Optional[Any] ) -> Any: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : Tuple , *__a : Tuple , **__a : Tuple ) -> Dict: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : Union[str, Any] , *__a : Optional[int] , **__a : Optional[int] ) -> Tuple: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : Any , *__a : int , **__a : Any ) -> Any: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : Union[str, Any] , *__a : Any , **__a : Optional[Any] ) -> Dict: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : Tuple , *__a : Dict , **__a : Optional[Any] ) -> List[str]: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : Any , *__a : List[str] , **__a : List[Any] ) -> List[str]: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : Union[str, Any] , *__a : List[Any] , **__a : Union[str, Any] ) -> str: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : Tuple , *__a : List[str] , **__a : str ) -> Dict: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : Optional[int] , *__a : int , **__a : Any ) -> Optional[int]: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : Optional[Any] , *__a : str , **__a : Optional[Any] ) -> Any: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : str , *__a : List[Any] , **__a : int ) -> str: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : Union[str, Any] , *__a : List[str] , **__a : Tuple ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : Any , *__a : Tuple , **__a : Union[str, Any] ) -> Dict: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : Tuple , *__a : List[Any] , **__a : str ) -> List[str]: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : Optional[Any] , *__a : List[str] , **__a : Optional[int] ) -> Dict: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : Optional[Any] , *__a : int , **__a : Optional[int] ) -> List[Any]: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : Dict , *__a : int , **__a : str ) -> List[str]: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : Tuple , *__a : Tuple , **__a : str ) -> List[str]: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : str , *__a : List[str] , **__a : str ) -> str: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : Tuple , *__a : Union[str, Any] , **__a : str ) -> Optional[Any]: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : Tuple , *__a : Tuple , **__a : List[Any] ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : List[str] , *__a : Optional[Any] , **__a : str ) -> Optional[Any]: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : Optional[int] , *__a : Optional[Any] , **__a : Any ) -> str: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : Optional[Any] , *__a : Any , **__a : List[str] ) -> Union[str, Any]: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : List[str] , *__a : Dict , **__a : List[Any] ) -> Any: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : Tuple , *__a : str , **__a : Optional[Any] ) -> str: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : Optional[int] , *__a : Optional[Any] , **__a : Union[str, Any] ) -> List[str]: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : int , *__a : Dict , **__a : Dict ) -> List[Any]: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : List[Any] , *__a : Optional[Any] , **__a : Tuple ) -> Dict: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : List[str] , *__a : Union[str, Any] , **__a : Optional[int] ) -> List[str]: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : str , *__a : Any , **__a : Optional[int] ) -> List[Any]: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : Optional[int] , *__a : Any , **__a : int ) -> List[str]: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : Union[str, Any] , *__a : Tuple , **__a : List[str] ) -> List[str]: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : Any , *__a : Tuple , **__a : List[str] ) -> List[Any]: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : List[Any] , *__a : str , **__a : List[str] ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : str , *__a : Dict , **__a : List[Any] ) -> Any: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : Union[str, Any] , *__a : Optional[int] , **__a : List[Any] ) -> int: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : Tuple , *__a : Any , **__a : List[Any] ) -> Optional[Any]: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : List[Any] , *__a : Optional[Any] , **__a : Any ) -> List[Any]: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : List[Any] , *__a : List[Any] , **__a : List[str] ) -> Optional[int]: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : Tuple , *__a : List[str] , **__a : Any ) -> Tuple: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : List[str] , *__a : Any , **__a : int ) -> Any: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : Optional[Any] , *__a : Dict , **__a : Any ) -> Any: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : List[Any] , *__a : Union[str, Any] , **__a : Union[str, Any] ) -> List[Any]: '''simple docstring''' requires_backends(cls , ['torch'] ) class snake_case__ ( metaclass=SCREAMING_SNAKE_CASE_ ): A__ = ['''torch'''] def __init__( self : List[str] , *__a : Tuple , **__a : Tuple ) -> Dict: '''simple docstring''' requires_backends(self , ['torch'] ) @classmethod def A_ ( cls : Tuple , *__a : Tuple , **__a : Tuple ) -> Optional[Any]: '''simple docstring''' requires_backends(cls , ['torch'] ) @classmethod def A_ ( cls : int , *__a : Union[str, Any] , **__a : str ) -> Tuple: '''simple docstring''' requires_backends(cls , ['torch'] )
0
'''simple docstring''' from math import factorial A__ : dict[str, int] = {str(digit): factorial(digit) for digit in range(1_0)} def a_ ( _UpperCAmelCase : int ) -> int: if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ): raise TypeError('Parameter number must be int' ) if number < 0: raise ValueError('Parameter number must be greater than or equal to 0' ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(_UpperCAmelCase ) ) def a_ ( _UpperCAmelCase : int = 60 ,_UpperCAmelCase : int = 1_00_00_00 ) -> int: if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ) or not isinstance(_UpperCAmelCase ,_UpperCAmelCase ): raise TypeError('Parameters chain_length and number_limit must be int' ) if chain_length <= 0 or number_limit <= 0: raise ValueError( 'Parameters chain_length and number_limit must be greater than 0' ) # the counter for the chains with the exact desired length __snake_case : List[str] = 0 # the cached sizes of the previous chains __snake_case : dict[int, int] = {} for start_chain_element in range(1 ,_UpperCAmelCase ): # The temporary set will contain the elements of the chain __snake_case : Optional[int] = set() __snake_case : List[Any] = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. __snake_case : str = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(_UpperCAmelCase ) chain_set_length += 1 __snake_case : Tuple = digit_factorial_sum(_UpperCAmelCase ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] __snake_case : Optional[Any] = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(F"""{solution()}""")
0
1
'''simple docstring''' A__ : Optional[Any] = 2_5_6 # Modulus to hash a string A__ : List[str] = 1_0_0_0_0_0_3 def a_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ) -> bool: __snake_case : Any = len(_UpperCAmelCase ) __snake_case : List[Any] = len(_UpperCAmelCase ) if p_len > t_len: return False __snake_case : int = 0 __snake_case : Optional[Any] = 0 __snake_case : Optional[int] = 1 # Calculating the hash of pattern and substring of text for i in range(_UpperCAmelCase ): __snake_case : Union[str, Any] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus __snake_case : Optional[int] = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue __snake_case : Optional[Any] = (modulus_power * alphabet_size) % modulus for i in range(0 ,t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash __snake_case : Optional[int] = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def a_ ( ) -> None: __snake_case : Optional[int] = 'abc1abc12' __snake_case : List[Any] = 'alskfjaldsabc1abc1abc12k23adsfabcabc' __snake_case : Any = 'alskfjaldsk23adsfabcabc' assert rabin_karp(_UpperCAmelCase ,_UpperCAmelCase ) and not rabin_karp(_UpperCAmelCase ,_UpperCAmelCase ) # Test 2) __snake_case : str = 'ABABX' __snake_case : List[str] = 'ABABZABABYABABX' assert rabin_karp(_UpperCAmelCase ,_UpperCAmelCase ) # Test 3) __snake_case : Tuple = 'AAAB' __snake_case : Union[str, Any] = 'ABAAAAAB' assert rabin_karp(_UpperCAmelCase ,_UpperCAmelCase ) # Test 4) __snake_case : Tuple = 'abcdabcy' __snake_case : str = 'abcxabcdabxabcdabcdabcy' assert rabin_karp(_UpperCAmelCase ,_UpperCAmelCase ) # Test 5) __snake_case : Dict = 'Lü' __snake_case : Union[str, Any] = 'Lüsai' assert rabin_karp(_UpperCAmelCase ,_UpperCAmelCase ) __snake_case : str = 'Lue' assert not rabin_karp(_UpperCAmelCase ,_UpperCAmelCase ) print('Success.' ) if __name__ == "__main__": test_rabin_karp()
0
'''simple docstring''' def a_ ( _UpperCAmelCase : int = 1_00 ) -> int: __snake_case : Any = n * (n + 1) * (2 * n + 1) / 6 __snake_case : Union[str, Any] = (n * (n + 1) / 2) ** 2 return int(square_of_sum - sum_of_squares ) if __name__ == "__main__": print(F"""{solution() = }""")
0
1
'''simple docstring''' import importlib.util import os import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import ( is_accelerate_available, is_flax_available, is_safetensors_available, is_tf_available, is_torch_available, ) from . import BaseTransformersCLICommand def a_ ( _UpperCAmelCase : List[Any] ) -> Optional[Any]: return EnvironmentCommand() def a_ ( _UpperCAmelCase : Union[str, Any] ) -> List[str]: return EnvironmentCommand(args.accelerate_config_file ) class snake_case__ ( SCREAMING_SNAKE_CASE_ ): @staticmethod def A_ ( __a : ArgumentParser ) -> int: '''simple docstring''' __snake_case : Optional[Any] = parser.add_parser('env' ) download_parser.set_defaults(func=__a ) download_parser.add_argument( '--accelerate-config_file' , default=__a , help='The accelerate config file to use for the default values in the launching script.' , ) download_parser.set_defaults(func=__a ) def __init__( self : Union[str, Any] , __a : Tuple , *__a : Dict ) -> None: '''simple docstring''' __snake_case : Tuple = accelerate_config_file def A_ ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' __snake_case : List[Any] = 'not installed' if is_safetensors_available(): import safetensors __snake_case : List[str] = safetensors.__version__ elif importlib.util.find_spec('safetensors' ) is not None: import safetensors __snake_case : Dict = f'''{safetensors.__version__} but is ignored because of PyTorch version too old.''' __snake_case : Optional[Any] = 'not installed' __snake_case : Optional[Any] = 'not found' if is_accelerate_available(): import accelerate from accelerate.commands.config import default_config_file, load_config_from_file __snake_case : Any = accelerate.__version__ # Get the default from the config file. if self._accelerate_config_file is not None or os.path.isfile(__a ): __snake_case : List[Any] = load_config_from_file(self._accelerate_config_file ).to_dict() __snake_case : List[Any] = ( '\n'.join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] ) if isinstance(__a , __a ) else f'''\t{accelerate_config}''' ) __snake_case : str = 'not installed' __snake_case : Dict = 'NA' if is_torch_available(): import torch __snake_case : Any = torch.__version__ __snake_case : List[str] = torch.cuda.is_available() __snake_case : Tuple = 'not installed' __snake_case : Dict = 'NA' if is_tf_available(): import tensorflow as tf __snake_case : Dict = tf.__version__ try: # deprecated in v2.1 __snake_case : Dict = tf.test.is_gpu_available() except AttributeError: # returns list of devices, convert to bool __snake_case : Dict = bool(tf.config.list_physical_devices('GPU' ) ) __snake_case : Union[str, Any] = 'not installed' __snake_case : int = 'not installed' __snake_case : str = 'not installed' __snake_case : Any = 'NA' if is_flax_available(): import flax import jax import jaxlib __snake_case : int = flax.__version__ __snake_case : Union[str, Any] = jax.__version__ __snake_case : List[Any] = jaxlib.__version__ __snake_case : Optional[Any] = jax.lib.xla_bridge.get_backend().platform __snake_case : Optional[int] = { '`transformers` version': version, 'Platform': platform.platform(), 'Python version': platform.python_version(), 'Huggingface_hub version': huggingface_hub.__version__, 'Safetensors version': f'''{safetensors_version}''', 'Accelerate version': f'''{accelerate_version}''', 'Accelerate config': f'''{accelerate_config_str}''', 'PyTorch version (GPU?)': f'''{pt_version} ({pt_cuda_available})''', 'Tensorflow version (GPU?)': f'''{tf_version} ({tf_cuda_available})''', 'Flax version (CPU?/GPU?/TPU?)': f'''{flax_version} ({jax_backend})''', 'Jax version': f'''{jax_version}''', 'JaxLib version': f'''{jaxlib_version}''', 'Using GPU in script?': '<fill in>', 'Using distributed or parallel set-up in script?': '<fill in>', } print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' ) print(self.format_dict(__a ) ) return info @staticmethod def A_ ( __a : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available A__ : int = { '''configuration_groupvit''': [ '''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GroupViTConfig''', '''GroupViTOnnxConfig''', '''GroupViTTextConfig''', '''GroupViTVisionConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Tuple = [ '''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GroupViTModel''', '''GroupViTPreTrainedModel''', '''GroupViTTextModel''', '''GroupViTVisionModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Optional[int] = [ '''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFGroupViTModel''', '''TFGroupViTPreTrainedModel''', '''TFGroupViTTextModel''', '''TFGroupViTVisionModel''', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys A__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
0
1
'''simple docstring''' import os def a_ ( ) -> int: with open(os.path.dirname(_UpperCAmelCase ) + '/p022_names.txt' ) as file: __snake_case : Optional[Any] = str(file.readlines()[0] ) __snake_case : int = names.replace('"' ,'' ).split(',' ) names.sort() __snake_case : Optional[int] = 0 __snake_case : int = 0 for i, name in enumerate(_UpperCAmelCase ): for letter in name: name_score += ord(_UpperCAmelCase ) - 64 total_score += (i + 1) * name_score __snake_case : Dict = 0 return total_score if __name__ == "__main__": print(solution())
0
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): A__ = ShapEPipeline A__ = ['''prompt'''] A__ = ['''prompt'''] A__ = [ '''num_images_per_prompt''', '''num_inference_steps''', '''generator''', '''latents''', '''guidance_scale''', '''frame_size''', '''output_type''', '''return_dict''', ] A__ = False @property def A_ ( self : Optional[Any] ) -> str: '''simple docstring''' return 32 @property def A_ ( self : str ) -> Optional[int]: '''simple docstring''' return 32 @property def A_ ( self : Tuple ) -> List[Any]: '''simple docstring''' return self.time_input_dim * 4 @property def A_ ( self : Tuple ) -> Dict: '''simple docstring''' return 8 @property def A_ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' __snake_case : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def A_ ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' torch.manual_seed(0 ) __snake_case : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(__a ) @property def A_ ( self : Union[str, Any] ) -> int: '''simple docstring''' torch.manual_seed(0 ) __snake_case : Dict = { 'num_attention_heads': 2, 'attention_head_dim': 16, 'embedding_dim': self.time_input_dim, 'num_embeddings': 32, 'embedding_proj_dim': self.text_embedder_hidden_size, 'time_embed_dim': self.time_embed_dim, 'num_layers': 1, 'clip_embed_dim': self.time_input_dim * 2, 'additional_embeddings': 0, 'time_embed_act_fn': 'gelu', 'norm_in_type': 'layer', 'encoder_hid_proj_type': None, 'added_emb_type': None, } __snake_case : Optional[Any] = PriorTransformer(**__a ) return model @property def A_ ( self : Dict ) -> Dict: '''simple docstring''' torch.manual_seed(0 ) __snake_case : Tuple = { 'param_shapes': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), 'd_latent': self.time_input_dim, 'd_hidden': self.renderer_dim, 'n_output': 12, 'background': ( 0.1, 0.1, 0.1, ), } __snake_case : Optional[int] = ShapERenderer(**__a ) return model def A_ ( self : Tuple ) -> Tuple: '''simple docstring''' __snake_case : Tuple = self.dummy_prior __snake_case : Union[str, Any] = self.dummy_text_encoder __snake_case : List[str] = self.dummy_tokenizer __snake_case : Optional[Any] = self.dummy_renderer __snake_case : List[Any] = HeunDiscreteScheduler( beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=__a , clip_sample=__a , clip_sample_range=1.0 , ) __snake_case : int = { 'prior': prior, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'renderer': renderer, 'scheduler': scheduler, } return components def A_ ( self : Union[str, Any] , __a : Dict , __a : int=0 ) -> Optional[Any]: '''simple docstring''' if str(__a ).startswith('mps' ): __snake_case : List[str] = torch.manual_seed(__a ) else: __snake_case : Optional[Any] = torch.Generator(device=__a ).manual_seed(__a ) __snake_case : Optional[int] = { 'prompt': 'horse', 'generator': generator, 'num_inference_steps': 1, 'frame_size': 32, 'output_type': 'np', } return inputs def A_ ( self : List[Any] ) -> List[Any]: '''simple docstring''' __snake_case : Dict = 'cpu' __snake_case : Dict = self.get_dummy_components() __snake_case : int = self.pipeline_class(**__a ) __snake_case : str = pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) __snake_case : Optional[Any] = pipe(**self.get_dummy_inputs(__a ) ) __snake_case : Dict = output.images[0] __snake_case : int = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) __snake_case : str = np.array( [ 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def A_ ( self : Any ) -> List[str]: '''simple docstring''' # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def A_ ( self : int ) -> Tuple: '''simple docstring''' __snake_case : int = torch_device == 'cpu' __snake_case : str = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=__a , relax_max_difference=__a , ) def A_ ( self : List[str] ) -> Dict: '''simple docstring''' __snake_case : str = self.get_dummy_components() __snake_case : Tuple = self.pipeline_class(**__a ) __snake_case : Dict = pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) __snake_case : int = 1 __snake_case : Tuple = 2 __snake_case : Tuple = self.get_dummy_inputs(__a ) for key in inputs.keys(): if key in self.batch_params: __snake_case : Union[str, Any] = batch_size * [inputs[key]] __snake_case : str = pipe(**__a , num_images_per_prompt=__a )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class snake_case__ ( unittest.TestCase ): def A_ ( self : str ) -> Dict: '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def A_ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' __snake_case : Optional[int] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/test_shap_e_np_out.npy' ) __snake_case : Union[str, Any] = ShapEPipeline.from_pretrained('openai/shap-e' ) __snake_case : Any = pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) __snake_case : Optional[int] = torch.Generator(device=__a ).manual_seed(0 ) __snake_case : Union[str, Any] = pipe( 'a shark' , generator=__a , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(__a , __a )
0
1
'''simple docstring''' from __future__ import annotations def a_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ,_UpperCAmelCase : float ) -> dict[str, float]: if (voltage, current, resistance).count(0 ) != 1: raise ValueError('One and only one argument must be 0' ) if resistance < 0: raise ValueError('Resistance cannot be negative' ) if voltage == 0: return {"voltage": float(current * resistance )} elif current == 0: return {"current": voltage / resistance} elif resistance == 0: return {"resistance": voltage / current} else: raise ValueError('Exactly one argument must be 0' ) if __name__ == "__main__": import doctest doctest.testmod()
0
'''simple docstring''' from __future__ import annotations import time import numpy as np A__ : str = [8, 5, 9, 7] A__ : List[str] = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] A__ : Dict = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class snake_case__ : def __init__( self : Union[str, Any] , __a : list[int] , __a : list[list[int]] , __a : list[list[int]] , ) -> None: '''simple docstring''' __snake_case : int = claim_vector __snake_case : Optional[int] = allocated_resources_table __snake_case : List[str] = maximum_claim_table def A_ ( self : str ) -> list[int]: '''simple docstring''' return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def A_ ( self : int ) -> list[int]: '''simple docstring''' return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def A_ ( self : int ) -> list[list[int]]: '''simple docstring''' return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(__a ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def A_ ( self : str ) -> dict[int, list[int]]: '''simple docstring''' return {self.__need().index(__a ): i for i in self.__need()} def A_ ( self : Union[str, Any] , **__a : int ) -> None: '''simple docstring''' __snake_case : str = self.__need() __snake_case : List[Any] = self.__allocated_resources_table __snake_case : Optional[int] = self.__available_resources() __snake_case : Union[str, Any] = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print('_' * 50 + '\n' ) while need_list: __snake_case : Tuple = False for each_need in need_list: __snake_case : Any = True for index, need in enumerate(__a ): if need > available_resources[index]: __snake_case : List[str] = False break if execution: __snake_case : Union[str, Any] = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: __snake_case : str = original_need_index print(f'''Process {process_number + 1} is executing.''' ) # remove the process run from stack need_list.remove(__a ) # update available/freed resources stack __snake_case : Union[str, Any] = np.array(__a ) + np.array( alloc_resources_table[process_number] ) print( 'Updated available resource stack for processes: ' + ' '.join([str(__a ) for x in available_resources] ) ) break if safe: print('The process is in a safe state.\n' ) else: print('System in unsafe state. Aborting...\n' ) break def A_ ( self : List[str] ) -> Optional[int]: '''simple docstring''' print(' ' * 9 + 'Allocated Resource Table' ) for item in self.__allocated_resources_table: print( f'''P{self.__allocated_resources_table.index(__a ) + 1}''' + ' '.join(f'''{it:>8}''' for it in item ) + '\n' ) print(' ' * 9 + 'System Resource Table' ) for item in self.__maximum_claim_table: print( f'''P{self.__maximum_claim_table.index(__a ) + 1}''' + ' '.join(f'''{it:>8}''' for it in item ) + '\n' ) print( 'Current Usage by Active Processes: ' + ' '.join(str(__a ) for x in self.__claim_vector ) ) print( 'Initial Available Resources: ' + ' '.join(str(__a ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
0
1
'''simple docstring''' import random def a_ ( _UpperCAmelCase : int ) -> bool: __snake_case : Tuple = num - 1 __snake_case : str = 0 while s % 2 == 0: __snake_case : Tuple = s // 2 t += 1 for _ in range(5 ): __snake_case : List[str] = random.randrange(2 ,num - 1 ) __snake_case : Union[str, Any] = pow(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) if v != 1: __snake_case : Union[str, Any] = 0 while v != (num - 1): if i == t - 1: return False else: __snake_case : Dict = i + 1 __snake_case : List[Any] = (v**2) % num return True def a_ ( _UpperCAmelCase : int ) -> bool: if num < 2: return False __snake_case : Optional[int] = [ 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 1_01, 1_03, 1_07, 1_09, 1_13, 1_27, 1_31, 1_37, 1_39, 1_49, 1_51, 1_57, 1_63, 1_67, 1_73, 1_79, 1_81, 1_91, 1_93, 1_97, 1_99, 2_11, 2_23, 2_27, 2_29, 2_33, 2_39, 2_41, 2_51, 2_57, 2_63, 2_69, 2_71, 2_77, 2_81, 2_83, 2_93, 3_07, 3_11, 3_13, 3_17, 3_31, 3_37, 3_47, 3_49, 3_53, 3_59, 3_67, 3_73, 3_79, 3_83, 3_89, 3_97, 4_01, 4_09, 4_19, 4_21, 4_31, 4_33, 4_39, 4_43, 4_49, 4_57, 4_61, 4_63, 4_67, 4_79, 4_87, 4_91, 4_99, 5_03, 5_09, 5_21, 5_23, 5_41, 5_47, 5_57, 5_63, 5_69, 5_71, 5_77, 5_87, 5_93, 5_99, 6_01, 6_07, 6_13, 6_17, 6_19, 6_31, 6_41, 6_43, 6_47, 6_53, 6_59, 6_61, 6_73, 6_77, 6_83, 6_91, 7_01, 7_09, 7_19, 7_27, 7_33, 7_39, 7_43, 7_51, 7_57, 7_61, 7_69, 7_73, 7_87, 7_97, 8_09, 8_11, 8_21, 8_23, 8_27, 8_29, 8_39, 8_53, 8_57, 8_59, 8_63, 8_77, 8_81, 8_83, 8_87, 9_07, 9_11, 9_19, 9_29, 9_37, 9_41, 9_47, 9_53, 9_67, 9_71, 9_77, 9_83, 9_91, 9_97, ] if num in low_primes: return True for prime in low_primes: if (num % prime) == 0: return False return rabin_miller(_UpperCAmelCase ) def a_ ( _UpperCAmelCase : int = 10_24 ) -> int: while True: __snake_case : Optional[int] = random.randrange(2 ** (keysize - 1) ,2 ** (keysize) ) if is_prime_low_num(_UpperCAmelCase ): return num if __name__ == "__main__": A__ : Optional[int] = generate_large_prime() print(('''Prime number:''', num)) print(('''is_prime_low_num:''', is_prime_low_num(num)))
0
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_electra import ElectraTokenizer A__ : Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} A__ : List[Any] = { '''vocab_file''': { '''google/electra-small-generator''': ( '''https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt''' ), '''google/electra-base-generator''': '''https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt''', '''google/electra-large-generator''': ( '''https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt''' ), '''google/electra-small-discriminator''': ( '''https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt''' ), '''google/electra-base-discriminator''': ( '''https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt''' ), '''google/electra-large-discriminator''': ( '''https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''google/electra-small-generator''': ( '''https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json''' ), '''google/electra-base-generator''': ( '''https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json''' ), '''google/electra-large-generator''': ( '''https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json''' ), '''google/electra-small-discriminator''': ( '''https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json''' ), '''google/electra-base-discriminator''': ( '''https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json''' ), '''google/electra-large-discriminator''': ( '''https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json''' ), }, } A__ : List[Any] = { '''google/electra-small-generator''': 5_1_2, '''google/electra-base-generator''': 5_1_2, '''google/electra-large-generator''': 5_1_2, '''google/electra-small-discriminator''': 5_1_2, '''google/electra-base-discriminator''': 5_1_2, '''google/electra-large-discriminator''': 5_1_2, } A__ : Optional[Any] = { '''google/electra-small-generator''': {'''do_lower_case''': True}, '''google/electra-base-generator''': {'''do_lower_case''': True}, '''google/electra-large-generator''': {'''do_lower_case''': True}, '''google/electra-small-discriminator''': {'''do_lower_case''': True}, '''google/electra-base-discriminator''': {'''do_lower_case''': True}, '''google/electra-large-discriminator''': {'''do_lower_case''': True}, } class snake_case__ ( SCREAMING_SNAKE_CASE_ ): A__ = VOCAB_FILES_NAMES A__ = PRETRAINED_VOCAB_FILES_MAP A__ = PRETRAINED_INIT_CONFIGURATION A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ = ElectraTokenizer def __init__( self : int , __a : List[Any]=None , __a : int=None , __a : List[str]=True , __a : Any="[UNK]" , __a : Any="[SEP]" , __a : Union[str, Any]="[PAD]" , __a : Dict="[CLS]" , __a : List[Any]="[MASK]" , __a : str=True , __a : Optional[int]=None , **__a : Optional[int] , ) -> str: '''simple docstring''' super().__init__( __a , tokenizer_file=__a , do_lower_case=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , tokenize_chinese_chars=__a , strip_accents=__a , **__a , ) __snake_case : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , __a ) != do_lower_case or normalizer_state.get('strip_accents' , __a ) != strip_accents or normalizer_state.get('handle_chinese_chars' , __a ) != tokenize_chinese_chars ): __snake_case : List[Any] = getattr(__a , normalizer_state.pop('type' ) ) __snake_case : str = do_lower_case __snake_case : Optional[int] = strip_accents __snake_case : Any = tokenize_chinese_chars __snake_case : Union[str, Any] = normalizer_class(**__a ) __snake_case : Any = do_lower_case def A_ ( self : Any , __a : List[str] , __a : Optional[Any]=None ) -> Dict: '''simple docstring''' __snake_case : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def A_ ( self : List[Any] , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]: '''simple docstring''' __snake_case : int = [self.sep_token_id] __snake_case : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def A_ ( self : Optional[int] , __a : str , __a : Optional[str] = None ) -> Tuple[str]: '''simple docstring''' __snake_case : Tuple = self._tokenizer.model.save(__a , name=__a ) return tuple(__a )
0
1
'''simple docstring''' import argparse from collections import defaultdict def a_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Union[str, Any] ) -> Dict: __snake_case : Optional[Any] = f'''{file}_{class_name}_{test_name}''' done_test[_id] += 1 with open(_UpperCAmelCase ,'r' ) as f: __snake_case : Dict = f.readlines() __snake_case : Optional[Any] = f'''class {class_name}(''' __snake_case : Any = f'''{4 * " "}def {test_name}(''' __snake_case : Tuple = f'''{8 * " "}{correct_line.split()[0]}''' __snake_case : Tuple = f'''{16 * " "}{correct_line.split()[0]}''' __snake_case : str = False __snake_case : Union[str, Any] = False __snake_case : int = False __snake_case : Any = False __snake_case : Union[str, Any] = 0 __snake_case : Optional[int] = 0 __snake_case : Dict = [] for line in lines: if line.startswith(_UpperCAmelCase ): __snake_case : List[Any] = True elif in_class and line.startswith(_UpperCAmelCase ): __snake_case : Union[str, Any] = True elif in_class and in_func and (line.startswith(_UpperCAmelCase ) or line.startswith(_UpperCAmelCase )): __snake_case : List[str] = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: __snake_case : int = True if in_class and in_func and in_line: if ")" not in line: continue else: __snake_case : Optional[int] = True if in_class and in_func and in_line and insert_line: new_lines.append(f'''{spaces * " "}{correct_line}''' ) __snake_case : Optional[Any] = False else: new_lines.append(_UpperCAmelCase ) with open(_UpperCAmelCase ,'w' ) as f: for line in new_lines: f.write(_UpperCAmelCase ) def a_ ( _UpperCAmelCase : List[str] ,_UpperCAmelCase : Any=None ) -> List[str]: if fail is not None: with open(_UpperCAmelCase ,'r' ) as f: __snake_case : Dict = {l.strip() for l in f.readlines()} else: __snake_case : int = None with open(_UpperCAmelCase ,'r' ) as f: __snake_case : Any = f.readlines() __snake_case : str = defaultdict(_UpperCAmelCase ) for line in correct_lines: __snake_case , __snake_case , __snake_case , __snake_case : List[str] = line.split(';' ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) if __name__ == "__main__": A__ : List[Any] = argparse.ArgumentParser() parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''') parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None) A__ : Dict = parser.parse_args() main(args.correct_filename, args.fail_filename)
0
'''simple docstring''' def a_ ( _UpperCAmelCase : int ) -> bool: __snake_case : Union[str, Any] = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(2_7)) print(perfect_cube(4))
0
1
'''simple docstring''' def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> str: return "\n".join( f'''{number} * {i} = {number * i}''' for i in range(1 ,number_of_terms + 1 ) ) if __name__ == "__main__": print(multiplication_table(number=5, number_of_terms=1_0))
0
'''simple docstring''' import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss A__ : Tuple = pytest.mark.integration @require_faiss class snake_case__ ( SCREAMING_SNAKE_CASE_ ): def A_ ( self : Any ) -> Tuple: '''simple docstring''' __snake_case : Dict = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(__a ) for x in np.arange(30 ).tolist()]} ) return dset def A_ ( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' import faiss __snake_case : Dataset = self._create_dummy_dataset() __snake_case : Dict = dset.map( lambda __a , __a : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__a , keep_in_memory=__a ) __snake_case : List[Any] = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT ) __snake_case , __snake_case : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) dset.drop_index('vecs' ) def A_ ( self : Tuple ) -> Any: '''simple docstring''' import faiss __snake_case : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , ) __snake_case , __snake_case : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) def A_ ( self : List[Any] ) -> Dict: '''simple docstring''' import faiss __snake_case : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=__a ) as tmp_file: dset.save_faiss_index('vecs' , tmp_file.name ) dset.load_faiss_index('vecs2' , tmp_file.name ) os.unlink(tmp_file.name ) __snake_case , __snake_case : str = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) def A_ ( self : Union[str, Any] ) -> Dict: '''simple docstring''' __snake_case : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' ) dset.drop_index('vecs' ) self.assertRaises(__a , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) ) def A_ ( self : List[str] ) -> List[str]: '''simple docstring''' from elasticsearch import Elasticsearch __snake_case : Dataset = self._create_dummy_dataset() with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch( 'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk: __snake_case : Any = {'acknowledged': True} mocked_bulk.return_value([(True, None)] * 30 ) __snake_case : Dict = {'hits': {'hits': [{'_score': 1, '_id': 29}]}} __snake_case : Union[str, Any] = Elasticsearch() dset.add_elasticsearch_index('filename' , es_client=__a ) __snake_case , __snake_case : str = dset.get_nearest_examples('filename' , 'my_name-train_29' ) self.assertEqual(examples['filename'][0] , 'my_name-train_29' ) @require_faiss class snake_case__ ( SCREAMING_SNAKE_CASE_ ): def A_ ( self : str ) -> int: '''simple docstring''' import faiss __snake_case : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 10 ) # single query __snake_case : Dict = np.zeros(5 , dtype=np.floataa ) __snake_case : List[str] = 1 __snake_case , __snake_case : List[Any] = index.search(__a ) self.assertRaises(__a , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries __snake_case : List[str] = np.eye(5 , dtype=np.floataa )[::-1] __snake_case , __snake_case : Dict = index.search_batch(__a ) self.assertRaises(__a , index.search_batch , queries[0] ) __snake_case : Any = [scores[0] for scores in total_scores] __snake_case : List[Any] = [indices[0] for indices in total_indices] self.assertGreater(np.min(__a ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , __a ) def A_ ( self : int ) -> int: '''simple docstring''' import faiss __snake_case : int = FaissIndex(string_factory='Flat' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) __snake_case : List[str] = FaissIndex(string_factory='LSH' ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(__a ): __snake_case : Dict = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) ) def A_ ( self : str ) -> Dict: '''simple docstring''' import faiss __snake_case : Tuple = faiss.IndexFlat(5 ) __snake_case : List[Any] = FaissIndex(custom_index=__a ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def A_ ( self : List[Any] ) -> int: '''simple docstring''' import faiss __snake_case : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=__a ) as tmp_file: index.save(tmp_file.name ) __snake_case : List[Any] = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) __snake_case : List[Any] = np.zeros(5 , dtype=np.floataa ) __snake_case : Any = 1 __snake_case , __snake_case : int = index.search(__a ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def a_ ( _UpperCAmelCase : str ) -> Optional[int]: import faiss __snake_case : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 ,dtype=np.floataa ) ) __snake_case : Dict = 'index.faiss' __snake_case : Any = f'''mock://{index_name}''' index.save(_UpperCAmelCase ,storage_options=mockfs.storage_options ) __snake_case : Any = FaissIndex.load(_UpperCAmelCase ,storage_options=mockfs.storage_options ) __snake_case : Any = np.zeros(5 ,dtype=np.floataa ) __snake_case : Any = 1 __snake_case , __snake_case : Tuple = index.search(_UpperCAmelCase ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class snake_case__ ( SCREAMING_SNAKE_CASE_ ): def A_ ( self : List[str] ) -> List[str]: '''simple docstring''' from elasticsearch import Elasticsearch with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch( 'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk: __snake_case : int = Elasticsearch() __snake_case : Dict = {'acknowledged': True} __snake_case : List[Any] = ElasticSearchIndex(es_client=__a ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(['foo', 'bar', 'foobar'] ) # single query __snake_case : Optional[Any] = 'foo' __snake_case : int = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} __snake_case , __snake_case : List[Any] = index.search(__a ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout __snake_case : Dict = 'foo' __snake_case : Dict = {'hits': {'hits': [{'_score': 1, '_id': 0}]}} __snake_case , __snake_case : Optional[Any] = index.search(__a , request_timeout=30 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries __snake_case : List[Any] = ['foo', 'bar', 'foobar'] __snake_case : str = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} __snake_case , __snake_case : Any = index.search_batch(__a ) __snake_case : Any = [scores[0] for scores in total_scores] __snake_case : Tuple = [indices[0] for indices in total_indices] self.assertGreater(np.min(__a ) , 0 ) self.assertListEqual([1, 1, 1] , __a ) # batched queries with timeout __snake_case : Tuple = ['foo', 'bar', 'foobar'] __snake_case : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}} __snake_case , __snake_case : int = index.search_batch(__a , request_timeout=30 ) __snake_case : Any = [scores[0] for scores in total_scores] __snake_case : Dict = [indices[0] for indices in total_indices] self.assertGreater(np.min(__a ) , 0 ) self.assertListEqual([1, 1, 1] , __a )
0
1
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging A__ : str = logging.get_logger(__name__) A__ : Optional[int] = { '''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''', # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class snake_case__ ( SCREAMING_SNAKE_CASE_ ): A__ = '''gpt_neo''' A__ = ['''past_key_values'''] A__ = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self : Union[str, Any] , __a : Tuple=50257 , __a : str=2048 , __a : Optional[Any]=2048 , __a : Union[str, Any]=24 , __a : List[str]=[[["global", "local"], 12]] , __a : Optional[int]=16 , __a : List[str]=None , __a : List[Any]=256 , __a : Union[str, Any]="gelu_new" , __a : Optional[int]=0.0 , __a : int=0.0 , __a : Dict=0.0 , __a : List[Any]=0.1 , __a : Tuple=1e-5 , __a : Union[str, Any]=0.0_2 , __a : Union[str, Any]=True , __a : List[str]=50256 , __a : Dict=50256 , **__a : int , ) -> Optional[Any]: '''simple docstring''' __snake_case : int = vocab_size __snake_case : List[str] = max_position_embeddings __snake_case : Optional[Any] = hidden_size __snake_case : List[str] = num_layers __snake_case : List[Any] = num_heads __snake_case : Optional[Any] = intermediate_size __snake_case : Optional[Any] = window_size __snake_case : int = activation_function __snake_case : Optional[int] = resid_dropout __snake_case : List[Any] = embed_dropout __snake_case : int = attention_dropout __snake_case : Any = classifier_dropout __snake_case : int = layer_norm_epsilon __snake_case : List[Any] = initializer_range __snake_case : List[Any] = use_cache __snake_case : Dict = bos_token_id __snake_case : List[Any] = eos_token_id __snake_case : Union[str, Any] = attention_types __snake_case : Union[str, Any] = self.expand_attention_types_params(__a ) if len(self.attention_layers ) != self.num_layers: raise ValueError( 'Configuration for convolutional module is incorrect. ' 'It is required that `len(config.attention_layers)` == `config.num_layers` ' f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, ''' f'''`config.num_layers = {self.num_layers}`. ''' '`config.attention_layers` is prepared using `config.attention_types`. ' 'Please verify the value of `config.attention_types` argument.' ) super().__init__(bos_token_id=__a , eos_token_id=__a , **__a ) @staticmethod def A_ ( __a : Tuple ) -> Union[str, Any]: '''simple docstring''' __snake_case : Tuple = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def a_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Union[str, Any] ) -> List[Any]: import torch __snake_case : Tuple = input.size() __snake_case : Any = len(_UpperCAmelCase ) __snake_case : Any = shape[dimension] __snake_case : Any = torch.arange(0 ,_UpperCAmelCase ,_UpperCAmelCase ) __snake_case : int = torch.div(sizedim - size ,_UpperCAmelCase ,rounding_mode='floor' ) + 1 __snake_case : List[Any] = torch.arange(_UpperCAmelCase ) + low_indices[:min_length][:, None] __snake_case : int = [slice(_UpperCAmelCase )] * rank __snake_case : Optional[Any] = indices __snake_case : Union[str, Any] = input[s] __snake_case : List[Any] = list(range(0 ,rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(_UpperCAmelCase ) def a_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Any ) -> Any: import torch __snake_case : Union[str, Any] = torch.arange(1 ,_UpperCAmelCase ) __snake_case : Dict = torch.remainder(_UpperCAmelCase ,_UpperCAmelCase ) __snake_case : Optional[int] = remainders == 0 __snake_case : Tuple = candidates[divisor_indices] __snake_case : str = torch.max(_UpperCAmelCase ) return largest_divisor, torch.div(_UpperCAmelCase ,_UpperCAmelCase ,rounding_mode='floor' ) class snake_case__ ( SCREAMING_SNAKE_CASE_ ): @property def A_ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' __snake_case : Optional[int] = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} ) if self.use_past: self.fill_with_past_key_values_(__a , direction='inputs' ) __snake_case : Dict = {0: 'batch', 1: 'past_sequence + sequence'} else: __snake_case : Tuple = {0: 'batch', 1: 'sequence'} return common_inputs @property def A_ ( self : Union[str, Any] ) -> int: '''simple docstring''' return self._config.num_heads def A_ ( self : Dict , __a : PreTrainedTokenizer , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional[TensorType] = None , ) -> Mapping[str, Any]: '''simple docstring''' __snake_case : Tuple = super(__a , self ).generate_dummy_inputs( __a , batch_size=__a , seq_length=__a , is_pair=__a , framework=__a ) # We need to order the input in the way they appears in the forward() __snake_case : Union[str, Any] = OrderedDict({'input_ids': common_inputs['input_ids']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' ) else: import torch __snake_case , __snake_case : int = common_inputs['input_ids'].shape # Not using the same length for past_key_values __snake_case : List[Any] = seqlen + 2 __snake_case : Union[str, Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) __snake_case : Optional[Any] = [ (torch.zeros(__a ), torch.zeros(__a )) for _ in range(self.num_layers ) ] __snake_case : Dict = common_inputs['attention_mask'] if self.use_past: __snake_case : Dict = ordered_inputs['attention_mask'].dtype __snake_case : Dict = torch.cat( [ordered_inputs['attention_mask'], torch.ones(__a , __a , dtype=__a )] , dim=1 ) return ordered_inputs @property def A_ ( self : int ) -> int: '''simple docstring''' return 13
0
'''simple docstring''' from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging A__ : List[Any] = logging.get_logger(__name__) A__ : Tuple = { '''t5-small''': '''https://huggingface.co/t5-small/resolve/main/config.json''', '''t5-base''': '''https://huggingface.co/t5-base/resolve/main/config.json''', '''t5-large''': '''https://huggingface.co/t5-large/resolve/main/config.json''', '''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/config.json''', '''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/config.json''', } class snake_case__ ( SCREAMING_SNAKE_CASE_ ): A__ = '''t5''' A__ = ['''past_key_values'''] A__ = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self : str , __a : Dict=32128 , __a : Dict=512 , __a : Union[str, Any]=64 , __a : str=2048 , __a : Union[str, Any]=6 , __a : Any=None , __a : Any=8 , __a : List[Any]=32 , __a : Any=128 , __a : Tuple=0.1 , __a : str=1e-6 , __a : Dict=1.0 , __a : Tuple="relu" , __a : Dict=True , __a : Union[str, Any]=True , __a : Any=0 , __a : Dict=1 , **__a : Union[str, Any] , ) -> Union[str, Any]: '''simple docstring''' __snake_case : int = vocab_size __snake_case : str = d_model __snake_case : str = d_kv __snake_case : List[Any] = d_ff __snake_case : List[str] = num_layers __snake_case : Tuple = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry __snake_case : Union[str, Any] = num_heads __snake_case : Tuple = relative_attention_num_buckets __snake_case : Optional[int] = relative_attention_max_distance __snake_case : Optional[Any] = dropout_rate __snake_case : str = layer_norm_epsilon __snake_case : List[str] = initializer_factor __snake_case : int = feed_forward_proj __snake_case : Optional[Any] = use_cache __snake_case : Optional[Any] = self.feed_forward_proj.split('-' ) __snake_case : Dict = act_info[-1] __snake_case : List[str] = act_info[0] == 'gated' if len(__a ) > 1 and act_info[0] != "gated" or len(__a ) > 2: raise ValueError( f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.''' 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": __snake_case : Dict = 'gelu_new' super().__init__( pad_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , **__a , ) class snake_case__ ( SCREAMING_SNAKE_CASE_ ): @property def A_ ( self : str ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' __snake_case : Union[str, Any] = { 'input_ids': {0: 'batch', 1: 'encoder_sequence'}, 'attention_mask': {0: 'batch', 1: 'encoder_sequence'}, } if self.use_past: __snake_case : Tuple = 'past_encoder_sequence + sequence' __snake_case : Dict = {0: 'batch'} __snake_case : Dict = {0: 'batch', 1: 'past_decoder_sequence + sequence'} else: __snake_case : Tuple = {0: 'batch', 1: 'decoder_sequence'} __snake_case : int = {0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(__a , direction='inputs' ) return common_inputs @property def A_ ( self : List[Any] ) -> int: '''simple docstring''' return 13
0
1
'''simple docstring''' import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import enable_full_determinism, skip_mps from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): A__ = KandinskyVaaPriorPipeline A__ = ['''prompt'''] A__ = ['''prompt''', '''negative_prompt'''] A__ = [ '''num_images_per_prompt''', '''generator''', '''num_inference_steps''', '''latents''', '''negative_prompt''', '''guidance_scale''', '''output_type''', '''return_dict''', ] A__ = False @property def A_ ( self : Dict ) -> List[str]: '''simple docstring''' return 32 @property def A_ ( self : Any ) -> str: '''simple docstring''' return 32 @property def A_ ( self : str ) -> Optional[int]: '''simple docstring''' return self.time_input_dim @property def A_ ( self : str ) -> int: '''simple docstring''' return self.time_input_dim * 4 @property def A_ ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' return 100 @property def A_ ( self : Tuple ) -> List[str]: '''simple docstring''' __snake_case : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def A_ ( self : Dict ) -> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) __snake_case : Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(__a ) @property def A_ ( self : Union[str, Any] ) -> Any: '''simple docstring''' torch.manual_seed(0 ) __snake_case : Any = { 'num_attention_heads': 2, 'attention_head_dim': 12, 'embedding_dim': self.text_embedder_hidden_size, 'num_layers': 1, } __snake_case : List[Any] = PriorTransformer(**__a ) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 __snake_case : Any = nn.Parameter(torch.ones(model.clip_std.shape ) ) return model @property def A_ ( self : List[str] ) -> List[str]: '''simple docstring''' torch.manual_seed(0 ) __snake_case : Optional[Any] = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , ) __snake_case : Optional[Any] = CLIPVisionModelWithProjection(__a ) return model @property def A_ ( self : Dict ) -> List[Any]: '''simple docstring''' __snake_case : Dict = CLIPImageProcessor( crop_size=224 , do_center_crop=__a , do_normalize=__a , do_resize=__a , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=224 , ) return image_processor def A_ ( self : Dict ) -> Optional[int]: '''simple docstring''' __snake_case : Tuple = self.dummy_prior __snake_case : List[str] = self.dummy_image_encoder __snake_case : str = self.dummy_text_encoder __snake_case : List[str] = self.dummy_tokenizer __snake_case : List[str] = self.dummy_image_processor __snake_case : Any = UnCLIPScheduler( variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=__a , clip_sample_range=1_0.0 , ) __snake_case : str = { 'prior': prior, 'image_encoder': image_encoder, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'scheduler': scheduler, 'image_processor': image_processor, } return components def A_ ( self : List[Any] , __a : Optional[Any] , __a : Tuple=0 ) -> Any: '''simple docstring''' if str(__a ).startswith('mps' ): __snake_case : List[str] = torch.manual_seed(__a ) else: __snake_case : List[str] = torch.Generator(device=__a ).manual_seed(__a ) __snake_case : List[Any] = { 'prompt': 'horse', 'generator': generator, 'guidance_scale': 4.0, 'num_inference_steps': 2, 'output_type': 'np', } return inputs def A_ ( self : str ) -> Dict: '''simple docstring''' __snake_case : str = 'cpu' __snake_case : List[str] = self.get_dummy_components() __snake_case : Tuple = self.pipeline_class(**__a ) __snake_case : Optional[Any] = pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) __snake_case : Optional[int] = pipe(**self.get_dummy_inputs(__a ) ) __snake_case : List[str] = output.image_embeds __snake_case : str = pipe( **self.get_dummy_inputs(__a ) , return_dict=__a , )[0] __snake_case : Union[str, Any] = image[0, -10:] __snake_case : Any = image_from_tuple[0, -10:] assert image.shape == (1, 32) __snake_case : List[Any] = np.array( [-0.0_5_3_2, 1.7_1_2_0, 0.3_6_5_6, -1.0_8_5_2, -0.8_9_4_6, -1.1_7_5_6, 0.4_3_4_8, 0.2_4_8_2, 0.5_1_4_6, -0.1_1_5_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def A_ ( self : Tuple ) -> Optional[int]: '''simple docstring''' __snake_case : Union[str, Any] = torch_device == 'cpu' __snake_case : Dict = True __snake_case : Union[str, Any] = False self._test_inference_batch_single_identical( test_max_difference=__a , relax_max_difference=__a , test_mean_pixel_difference=__a , ) @skip_mps def A_ ( self : str ) -> Union[str, Any]: '''simple docstring''' __snake_case : Dict = torch_device == 'cpu' __snake_case : Optional[Any] = False self._test_attention_slicing_forward_pass( test_max_difference=__a , test_mean_pixel_difference=__a , )
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging A__ : Tuple = logging.get_logger(__name__) A__ : Optional[int] = {} class snake_case__ ( SCREAMING_SNAKE_CASE_ ): A__ = '''llama''' A__ = ['''past_key_values'''] def __init__( self : Any , __a : List[str]=32000 , __a : Union[str, Any]=4096 , __a : Optional[Any]=11008 , __a : Any=32 , __a : str=32 , __a : Optional[int]=None , __a : Dict="silu" , __a : Dict=2048 , __a : List[str]=0.0_2 , __a : Union[str, Any]=1e-6 , __a : Dict=True , __a : List[str]=0 , __a : Tuple=1 , __a : Tuple=2 , __a : Optional[Any]=1 , __a : Any=False , __a : Tuple=None , **__a : List[Any] , ) -> Optional[int]: '''simple docstring''' __snake_case : str = vocab_size __snake_case : List[str] = max_position_embeddings __snake_case : List[Any] = hidden_size __snake_case : Union[str, Any] = intermediate_size __snake_case : Optional[int] = num_hidden_layers __snake_case : List[Any] = num_attention_heads # for backward compatibility if num_key_value_heads is None: __snake_case : Optional[int] = num_attention_heads __snake_case : Optional[Any] = num_key_value_heads __snake_case : int = hidden_act __snake_case : Any = initializer_range __snake_case : Any = rms_norm_eps __snake_case : Union[str, Any] = pretraining_tp __snake_case : Optional[int] = use_cache __snake_case : Any = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , tie_word_embeddings=__a , **__a , ) def A_ ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' if self.rope_scaling is None: return if not isinstance(self.rope_scaling , __a ) or len(self.rope_scaling ) != 2: raise ValueError( '`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ' f'''got {self.rope_scaling}''' ) __snake_case : Optional[Any] = self.rope_scaling.get('type' , __a ) __snake_case : Tuple = self.rope_scaling.get('factor' , __a ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(__a , __a ) or rope_scaling_factor <= 1.0: raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
0
1
'''simple docstring''' from __future__ import annotations from cmath import sqrt def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ) -> tuple[complex, complex]: if a == 0: raise ValueError('Coefficient \'a\' must not be zero.' ) __snake_case : int = b * b - 4 * a * c __snake_case : Optional[int] = (-b + sqrt(_UpperCAmelCase )) / (2 * a) __snake_case : List[Any] = (-b - sqrt(_UpperCAmelCase )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def a_ ( ) -> Optional[int]: __snake_case , __snake_case : Union[str, Any] = quadratic_roots(a=5 ,b=6 ,c=1 ) print(f'''The solutions are: {solutiona} and {solutiona}''' ) if __name__ == "__main__": main()
0
'''simple docstring''' from __future__ import annotations A__ : str = '''Muhammad Umer Farooq''' A__ : int = '''MIT''' A__ : Optional[int] = '''1.0.0''' A__ : List[Any] = '''Muhammad Umer Farooq''' A__ : Optional[Any] = '''[email protected]''' A__ : Optional[Any] = '''Alpha''' import re from html.parser import HTMLParser from urllib import parse import requests class snake_case__ ( SCREAMING_SNAKE_CASE_ ): def __init__( self : Union[str, Any] , __a : str ) -> None: '''simple docstring''' super().__init__() __snake_case : list[str] = [] __snake_case : Dict = domain def A_ ( self : Dict , __a : str , __a : list[tuple[str, str | None]] ) -> None: '''simple docstring''' # Only parse the 'anchor' tag. if tag == "a": # Check the list of defined attributes. for name, value in attrs: # If href is defined, and not empty nor # print it. if name == "href" and value != "#" and value != "": # If not already in urls. if value not in self.urls: __snake_case : Optional[Any] = parse.urljoin(self.domain , __a ) self.urls.append(__a ) def a_ ( _UpperCAmelCase : str ) -> str: return ".".join(get_sub_domain_name(_UpperCAmelCase ).split('.' )[-2:] ) def a_ ( _UpperCAmelCase : str ) -> str: return parse.urlparse(_UpperCAmelCase ).netloc def a_ ( _UpperCAmelCase : str = "https://github.com" ) -> list[str]: __snake_case : List[Any] = get_domain_name(_UpperCAmelCase ) # Initialize the parser __snake_case : Tuple = Parser(_UpperCAmelCase ) try: # Open URL __snake_case : Any = requests.get(_UpperCAmelCase ) # pass the raw HTML to the parser to get links parser.feed(r.text ) # Get links and loop through __snake_case : Dict = set() for link in parser.urls: # open URL. # read = requests.get(link) try: __snake_case : List[Any] = requests.get(_UpperCAmelCase ) # Get the valid email. __snake_case : Optional[Any] = re.findall('[a-zA-Z0-9]+@' + domain ,read.text ) # If not in list then append it. for email in emails: valid_emails.add(_UpperCAmelCase ) except ValueError: pass except ValueError: raise SystemExit(1 ) # Finally return a sorted list of email addresses with no duplicates. return sorted(_UpperCAmelCase ) if __name__ == "__main__": A__ : Tuple = emails_from_url('''https://github.com''') print(F"""{len(emails)} emails found:""") print('''\n'''.join(sorted(emails)))
0
1
'''simple docstring''' import argparse from pathlib import Path import torch from transformers import OPTConfig, OPTModel from transformers.utils import logging logging.set_verbosity_info() A__ : Union[str, Any] = logging.get_logger(__name__) def a_ ( _UpperCAmelCase : List[str] ) -> Dict: __snake_case : str = torch.load(_UpperCAmelCase ,map_location='cpu' ) if "model" in sd.keys(): __snake_case : Any = torch.load(_UpperCAmelCase ,map_location='cpu' )['model'] # pop unnecessary weights __snake_case : str = [ 'decoder.version', 'decoder.output_projection.weight', ] for key in keys_to_delete: if key in sd: sd.pop(_UpperCAmelCase ) __snake_case : Dict = { 'decoder.project_in_dim.weight': 'decoder.project_in.weight', 'decoder.project_out_dim.weight': 'decoder.project_out.weight', 'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight', 'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias', } for old_key, new_key in keys_to_rename.items(): if old_key in sd: __snake_case : int = sd.pop(_UpperCAmelCase ) __snake_case : Optional[Any] = list(sd.keys() ) for key in keys: if ".qkv_proj." in key: __snake_case : List[Any] = sd[key] # We split QKV in separate Q,K,V __snake_case : Tuple = key.replace('.qkv_proj.' ,'.q_proj.' ) __snake_case : Any = key.replace('.qkv_proj.' ,'.k_proj.' ) __snake_case : Optional[Any] = key.replace('.qkv_proj.' ,'.v_proj.' ) __snake_case : List[Any] = value.shape[0] assert depth % 3 == 0 # `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming: # https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97 __snake_case , __snake_case , __snake_case : Optional[Any] = torch.split(_UpperCAmelCase ,depth // 3 ,dim=0 ) __snake_case : Optional[Any] = q __snake_case : List[str] = k __snake_case : Any = v del sd[key] return sd @torch.no_grad() def a_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Dict=None ) -> int: __snake_case : Dict = load_checkpoint(_UpperCAmelCase ) if config is not None: __snake_case : Union[str, Any] = OPTConfig.from_pretrained(_UpperCAmelCase ) else: __snake_case : int = OPTConfig() __snake_case : Optional[int] = OPTModel(_UpperCAmelCase ).half().eval() model.load_state_dict(_UpperCAmelCase ) # Check results Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase ) model.save_pretrained(_UpperCAmelCase ) if __name__ == "__main__": A__ : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--fairseq_path''', type=str, help=( '''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:''' ''' https://huggingface.co/models?other=opt_metasq''' ), ) parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''') A__ : List[str] = parser.parse_args() convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
0
'''simple docstring''' import argparse import json import logging import os import shutil import sys import tempfile import unittest from unittest import mock import torch from accelerate.utils import write_basic_config from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device from transformers.utils import is_apex_available logging.basicConfig(level=logging.DEBUG) A__ : Dict = logging.getLogger() def a_ ( ) -> Tuple: __snake_case : List[Any] = argparse.ArgumentParser() parser.add_argument('-f' ) __snake_case : Any = parser.parse_args() return args.f def a_ ( _UpperCAmelCase : Optional[int] ) -> List[Any]: __snake_case : Tuple = {} __snake_case : Union[str, Any] = os.path.join(_UpperCAmelCase ,'all_results.json' ) if os.path.exists(_UpperCAmelCase ): with open(_UpperCAmelCase ,'r' ) as f: __snake_case : List[str] = json.load(_UpperCAmelCase ) else: raise ValueError(f'''can\'t find {path}''' ) return results def a_ ( ) -> Union[str, Any]: __snake_case : Union[str, Any] = torch.cuda.is_available() and torch_device == 'cuda' return is_using_cuda and is_apex_available() A__ : str = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class snake_case__ ( SCREAMING_SNAKE_CASE_ ): @classmethod def A_ ( cls : Any ) -> List[str]: '''simple docstring''' # Write Accelerate config, will pick up on CPU, GPU, and multi-GPU __snake_case : Optional[int] = tempfile.mkdtemp() __snake_case : Dict = os.path.join(cls.tmpdir , 'default_config.yml' ) write_basic_config(save_location=cls.configPath ) __snake_case : List[Any] = ['accelerate', 'launch', '--config_file', cls.configPath] @classmethod def A_ ( cls : List[str] ) -> List[str]: '''simple docstring''' shutil.rmtree(cls.tmpdir ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : Any ) -> Optional[Any]: '''simple docstring''' __snake_case : List[Any] = self.get_auto_remove_tmp_dir() __snake_case : Dict = f''' {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py --model_name_or_path distilbert-base-uncased --output_dir {tmp_dir} --train_file ./tests/fixtures/tests_samples/MRPC/train.csv --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --learning_rate=1e-4 --seed=42 --checkpointing_steps epoch --with_tracking '''.split() if is_cuda_and_apex_available(): testargs.append('--fp16' ) run_command(self._launch_args + testargs ) __snake_case : List[Any] = get_results(__a ) self.assertGreaterEqual(result['eval_accuracy'] , 0.7_5 ) self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'glue_no_trainer' ) ) ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : List[Any] ) -> Union[str, Any]: '''simple docstring''' __snake_case : Tuple = self.get_auto_remove_tmp_dir() __snake_case : str = f''' {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py --model_name_or_path distilgpt2 --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --block_size 128 --per_device_train_batch_size 5 --per_device_eval_batch_size 5 --num_train_epochs 2 --output_dir {tmp_dir} --checkpointing_steps epoch --with_tracking '''.split() if torch.cuda.device_count() > 1: # Skipping because there are not enough batches to train the model + would need a drop_last to work. return run_command(self._launch_args + testargs ) __snake_case : str = get_results(__a ) self.assertLess(result['perplexity'] , 100 ) self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'clm_no_trainer' ) ) ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : str ) -> List[str]: '''simple docstring''' __snake_case : int = self.get_auto_remove_tmp_dir() __snake_case : List[str] = f''' {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py --model_name_or_path distilroberta-base --train_file ./tests/fixtures/sample_text.txt --validation_file ./tests/fixtures/sample_text.txt --output_dir {tmp_dir} --num_train_epochs=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) __snake_case : List[str] = get_results(__a ) self.assertLess(result['perplexity'] , 42 ) self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'mlm_no_trainer' ) ) ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' # with so little data distributed training needs more epochs to get the score on par with 0/1 gpu __snake_case : Any = 7 if get_gpu_count() > 1 else 2 __snake_case : Any = self.get_auto_remove_tmp_dir() __snake_case : int = f''' {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/conll/sample.json --validation_file tests/fixtures/tests_samples/conll/sample.json --output_dir {tmp_dir} --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=2 --num_train_epochs={epochs} --seed 7 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) __snake_case : Dict = get_results(__a ) self.assertGreaterEqual(result['eval_accuracy'] , 0.7_5 ) self.assertLess(result['train_loss'] , 0.5 ) self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'ner_no_trainer' ) ) ) @unittest.skip(reason='Fix me @muellerzr' ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : Any ) -> List[Any]: '''simple docstring''' __snake_case : Any = self.get_auto_remove_tmp_dir() __snake_case : Tuple = f''' {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py --model_name_or_path bert-base-uncased --version_2_with_negative --train_file tests/fixtures/tests_samples/SQUAD/sample.json --validation_file tests/fixtures/tests_samples/SQUAD/sample.json --output_dir {tmp_dir} --seed=42 --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) __snake_case : str = get_results(__a ) # Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics. self.assertGreaterEqual(result['eval_f1'] , 28 ) self.assertGreaterEqual(result['eval_exact'] , 28 ) self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'qa_no_trainer' ) ) ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : Dict ) -> List[Any]: '''simple docstring''' __snake_case : str = self.get_auto_remove_tmp_dir() __snake_case : Any = f''' {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py --model_name_or_path bert-base-uncased --train_file tests/fixtures/tests_samples/swag/sample.json --validation_file tests/fixtures/tests_samples/swag/sample.json --output_dir {tmp_dir} --max_train_steps=20 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --with_tracking '''.split() run_command(self._launch_args + testargs ) __snake_case : str = get_results(__a ) self.assertGreaterEqual(result['eval_accuracy'] , 0.8 ) self.assertTrue(os.path.exists(os.path.join(__a , 'swag_no_trainer' ) ) ) @slow @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : Any ) -> Union[str, Any]: '''simple docstring''' __snake_case : Tuple = self.get_auto_remove_tmp_dir() __snake_case : List[str] = f''' {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py --model_name_or_path t5-small --train_file tests/fixtures/tests_samples/xsum/sample.json --validation_file tests/fixtures/tests_samples/xsum/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) __snake_case : int = get_results(__a ) self.assertGreaterEqual(result['eval_rouge1'] , 10 ) self.assertGreaterEqual(result['eval_rouge2'] , 2 ) self.assertGreaterEqual(result['eval_rougeL'] , 7 ) self.assertGreaterEqual(result['eval_rougeLsum'] , 7 ) self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'summarization_no_trainer' ) ) ) @slow @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : Union[str, Any] ) -> int: '''simple docstring''' __snake_case : Tuple = self.get_auto_remove_tmp_dir() __snake_case : str = f''' {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py --model_name_or_path sshleifer/student_marian_en_ro_6_1 --source_lang en --target_lang ro --train_file tests/fixtures/tests_samples/wmt16/sample.json --validation_file tests/fixtures/tests_samples/wmt16/sample.json --output_dir {tmp_dir} --max_train_steps=50 --num_warmup_steps=8 --num_beams=6 --learning_rate=3e-3 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --source_lang en_XX --target_lang ro_RO --checkpointing_steps epoch --with_tracking '''.split() run_command(self._launch_args + testargs ) __snake_case : Dict = get_results(__a ) self.assertGreaterEqual(result['eval_bleu'] , 30 ) self.assertTrue(os.path.exists(os.path.join(__a , 'epoch_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'translation_no_trainer' ) ) ) @slow def A_ ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' __snake_case : Union[str, Any] = logging.StreamHandler(sys.stdout ) logger.addHandler(__a ) __snake_case : List[str] = self.get_auto_remove_tmp_dir() __snake_case : int = f''' {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py --dataset_name huggingface/semantic-segmentation-test-sample --output_dir {tmp_dir} --max_train_steps=10 --num_warmup_steps=2 --learning_rate=2e-4 --per_device_train_batch_size=2 --per_device_eval_batch_size=1 --checkpointing_steps epoch '''.split() run_command(self._launch_args + testargs ) __snake_case : List[str] = get_results(__a ) self.assertGreaterEqual(result['eval_overall_accuracy'] , 0.1_0 ) @mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} ) def A_ ( self : Tuple ) -> Any: '''simple docstring''' __snake_case : Dict = self.get_auto_remove_tmp_dir() __snake_case : Dict = f''' {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py --model_name_or_path google/vit-base-patch16-224-in21k --dataset_name hf-internal-testing/cats_vs_dogs_sample --learning_rate 1e-4 --per_device_train_batch_size 2 --per_device_eval_batch_size 1 --max_train_steps 2 --train_val_split 0.1 --seed 42 --output_dir {tmp_dir} --with_tracking --checkpointing_steps 1 '''.split() if is_cuda_and_apex_available(): testargs.append('--fp16' ) run_command(self._launch_args + testargs ) __snake_case : Optional[int] = get_results(__a ) # The base model scores a 25% self.assertGreaterEqual(result['eval_accuracy'] , 0.6 ) self.assertTrue(os.path.exists(os.path.join(__a , 'step_1' ) ) ) self.assertTrue(os.path.exists(os.path.join(__a , 'image_classification_no_trainer' ) ) )
0
1
'''simple docstring''' def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int: return x if y == 0 else greatest_common_divisor(_UpperCAmelCase ,x % y ) def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : int ) -> int: return (x * y) // greatest_common_divisor(_UpperCAmelCase ,_UpperCAmelCase ) def a_ ( _UpperCAmelCase : int = 20 ) -> int: __snake_case : Optional[Any] = 1 for i in range(1 ,n + 1 ): __snake_case : Optional[int] = lcm(_UpperCAmelCase ,_UpperCAmelCase ) return g if __name__ == "__main__": print(F"""{solution() = }""")
0
'''simple docstring''' import math def a_ ( _UpperCAmelCase : int ) -> list: __snake_case : Optional[Any] = [True] * n __snake_case : Optional[int] = False __snake_case : Dict = False __snake_case : List[Any] = True for i in range(3 ,int(n**0.5 + 1 ) ,2 ): __snake_case : Optional[int] = i * 2 while index < n: __snake_case : Union[str, Any] = False __snake_case : int = index + i __snake_case : Dict = [2] for i in range(3 ,_UpperCAmelCase ,2 ): if is_prime[i]: primes.append(_UpperCAmelCase ) return primes def a_ ( _UpperCAmelCase : int = 99_99_66_66_33_33 ) -> int: __snake_case : List[Any] = math.floor(math.sqrt(_UpperCAmelCase ) ) + 1_00 __snake_case : Tuple = prime_sieve(_UpperCAmelCase ) __snake_case : List[Any] = 0 __snake_case : List[Any] = 0 __snake_case : Optional[int] = primes[prime_index] while (last_prime**2) <= limit: __snake_case : Optional[int] = primes[prime_index + 1] __snake_case : Union[str, Any] = last_prime**2 __snake_case : Dict = next_prime**2 # Get numbers divisible by lps(current) __snake_case : Optional[Any] = lower_bound + last_prime while upper_bound > current <= limit: matches_sum += current current += last_prime # Reset the upper_bound while (upper_bound - next_prime) > limit: upper_bound -= next_prime # Add the numbers divisible by ups(current) __snake_case : Optional[Any] = upper_bound - next_prime while current > lower_bound: matches_sum += current current -= next_prime # Remove the numbers divisible by both ups and lps __snake_case : List[str] = 0 while upper_bound > current <= limit: if current <= lower_bound: # Increment the current number current += last_prime * next_prime continue if current > limit: break # Remove twice since it was added by both ups and lps matches_sum -= current * 2 # Increment the current number current += last_prime * next_prime # Setup for next pair __snake_case : Dict = next_prime prime_index += 1 return matches_sum if __name__ == "__main__": print(solution())
0
1
'''simple docstring''' import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin A__ : Optional[int] = ''' Hugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning. In March 2021, Hugging Face raised $40 million in a Series B funding round.[3] On April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5] ''' class snake_case__ ( unittest.TestCase , SCREAMING_SNAKE_CASE_ ): def A_ ( self : Dict ) -> Tuple: '''simple docstring''' __snake_case : List[str] = load_tool('text-question-answering' ) self.tool.setup() __snake_case : str = load_tool('text-question-answering' , remote=__a ) def A_ ( self : List[Any] ) -> str: '''simple docstring''' __snake_case : List[Any] = self.tool(__a , 'What did Hugging Face do in April 2021?' ) self.assertEqual(__a , 'launched the BigScience Research Workshop' ) def A_ ( self : int ) -> int: '''simple docstring''' __snake_case : List[str] = self.remote_tool(__a , 'What did Hugging Face do in April 2021?' ) self.assertEqual(__a , 'launched the BigScience Research Workshop' ) def A_ ( self : List[str] ) -> Optional[int]: '''simple docstring''' __snake_case : List[str] = self.tool(text=__a , question='What did Hugging Face do in April 2021?' ) self.assertEqual(__a , 'launched the BigScience Research Workshop' ) def A_ ( self : Any ) -> List[Any]: '''simple docstring''' __snake_case : int = self.remote_tool(text=__a , question='What did Hugging Face do in April 2021?' ) self.assertEqual(__a , 'launched the BigScience Research Workshop' )
0
'''simple docstring''' def a_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ) -> float: return price * (1 + tax_rate) if __name__ == "__main__": print(F"""{price_plus_tax(1_0_0, 0.25) = }""") print(F"""{price_plus_tax(1_25.50, 0.05) = }""")
0
1
'''simple docstring''' import argparse from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta from transformers.utils import logging logging.set_verbosity_info() def a_ ( _UpperCAmelCase : str ,_UpperCAmelCase : int ,_UpperCAmelCase : int ) -> Any: # Initialise PyTorch model __snake_case : List[str] = TaConfig.from_json_file(_UpperCAmelCase ) print(f'''Building PyTorch model from configuration: {config}''' ) __snake_case : Optional[Any] = TaForConditionalGeneration(_UpperCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_ta(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) model.save_pretrained(_UpperCAmelCase ) if __name__ == "__main__": A__ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) A__ : str = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
0
'''simple docstring''' from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class snake_case__ ( SCREAMING_SNAKE_CASE_ ): def A_ ( self : List[Any] ) -> int: '''simple docstring''' __snake_case : Optional[int] = SMALL_MODEL_IDENTIFIER __snake_case : str = 'pt' __snake_case : Union[str, Any] = 'tf' def A_ ( self : Dict , __a : Tuple ) -> Dict: '''simple docstring''' __snake_case : Optional[int] = AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(__a ) def A_ ( self : Any , __a : Optional[Any] ) -> Dict: '''simple docstring''' __snake_case : Union[str, Any] = TFAutoModel.from_pretrained(self.test_model , from_pt=__a ) model_tf.save_pretrained(__a ) def A_ ( self : Any ) -> Tuple: '''simple docstring''' __snake_case : Tuple = 'mock_framework' # Framework provided - return whatever the user provides __snake_case : int = FeaturesManager.determine_framework(self.test_model , __a ) self.assertEqual(__a , __a ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(__a ) __snake_case : List[Any] = FeaturesManager.determine_framework(__a , __a ) self.assertEqual(__a , __a ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(__a ) __snake_case : Tuple = FeaturesManager.determine_framework(__a , __a ) self.assertEqual(__a , __a ) def A_ ( self : Union[str, Any] ) -> Any: '''simple docstring''' # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(__a ) __snake_case : Tuple = FeaturesManager.determine_framework(__a ) self.assertEqual(__a , self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(__a ) __snake_case : Union[str, Any] = FeaturesManager.determine_framework(__a ) self.assertEqual(__a , self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(__a ): __snake_case : Optional[int] = FeaturesManager.determine_framework(__a ) def A_ ( self : Any ) -> List[Any]: '''simple docstring''' __snake_case : Union[str, Any] = MagicMock(return_value=__a ) with patch('transformers.onnx.features.is_tf_available' , __a ): __snake_case : int = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(__a , self.framework_pt ) # PyTorch not in environment -> use TensorFlow __snake_case : Tuple = MagicMock(return_value=__a ) with patch('transformers.onnx.features.is_torch_available' , __a ): __snake_case : Dict = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(__a , self.framework_tf ) # Both in environment -> use PyTorch __snake_case : Optional[Any] = MagicMock(return_value=__a ) __snake_case : Tuple = MagicMock(return_value=__a ) with patch('transformers.onnx.features.is_tf_available' , __a ), patch( 'transformers.onnx.features.is_torch_available' , __a ): __snake_case : Dict = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(__a , self.framework_pt ) # Both not in environment -> raise error __snake_case : str = MagicMock(return_value=__a ) __snake_case : List[Any] = MagicMock(return_value=__a ) with patch('transformers.onnx.features.is_tf_available' , __a ), patch( 'transformers.onnx.features.is_torch_available' , __a ): with self.assertRaises(__a ): __snake_case : Tuple = FeaturesManager.determine_framework(self.test_model )
0
1
'''simple docstring''' import datasets from .evaluate import evaluate A__ : List[Any] = '''\ @inproceedings{Rajpurkar2016SQuAD10, title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text}, author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang}, booktitle={EMNLP}, year={2016} } ''' A__ : Optional[Any] = ''' This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD). Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, from the corresponding reading passage, or the question might be unanswerable. ''' A__ : Any = ''' Computes SQuAD scores (F1 and EM). Args: predictions: List of question-answers dictionaries with the following key-values: - \'id\': id of the question-answer pair as given in the references (see below) - \'prediction_text\': the text of the answer references: List of question-answers dictionaries with the following key-values: - \'id\': id of the question-answer pair (see above), - \'answers\': a Dict in the SQuAD dataset format { \'text\': list of possible texts for the answer, as a list of strings \'answer_start\': list of start positions for the answer, as a list of ints } Note that answer_start values are not taken into account to compute the metric. Returns: \'exact_match\': Exact match (the normalized answer exactly match the gold answer) \'f1\': The F-score of predicted tokens versus the gold answer Examples: >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}] >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}] >>> squad_metric = datasets.load_metric("squad") >>> results = squad_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 100.0, \'f1\': 100.0} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case__ ( datasets.Metric ): def A_ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': {'id': datasets.Value('string' ), 'prediction_text': datasets.Value('string' )}, 'references': { 'id': datasets.Value('string' ), 'answers': datasets.features.Sequence( { 'text': datasets.Value('string' ), 'answer_start': datasets.Value('int32' ), } ), }, } ) , codebase_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] , reference_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] , ) def A_ ( self : Optional[Any] , __a : List[str] , __a : Optional[int] ) -> Dict: '''simple docstring''' __snake_case : Tuple = {prediction['id']: prediction['prediction_text'] for prediction in predictions} __snake_case : Union[str, Any] = [ { 'paragraphs': [ { 'qas': [ { 'answers': [{'text': answer_text} for answer_text in ref['answers']['text']], 'id': ref['id'], } for ref in references ] } ] } ] __snake_case : Dict = evaluate(dataset=__a , predictions=__a ) return score
0
'''simple docstring''' import os import unittest from transformers import BatchEncoding from transformers.models.bert.tokenization_bert import ( BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer from transformers.testing_utils import require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): A__ = ProphetNetTokenizer A__ = False def A_ ( self : Optional[int] ) -> Dict: '''simple docstring''' super().setUp() __snake_case : Dict = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] __snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def A_ ( self : int , __a : Union[str, Any] ) -> List[str]: '''simple docstring''' __snake_case : Optional[int] = 'UNwant\u00E9d,running' __snake_case : List[str] = 'unwanted, running' return input_text, output_text def A_ ( self : Union[str, Any] ) -> str: '''simple docstring''' __snake_case : Dict = self.tokenizer_class(self.vocab_file ) __snake_case : List[str] = tokenizer.tokenize('UNwant\u00E9d,running' ) self.assertListEqual(__a , ['un', '##want', '##ed', ',', 'runn', '##ing'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [9, 6, 7, 12, 10, 11] ) def A_ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' __snake_case : List[str] = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] ) def A_ ( self : Union[str, Any] ) -> str: '''simple docstring''' __snake_case : Optional[int] = BasicTokenizer(do_lower_case=__a ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def A_ ( self : Dict ) -> Optional[int]: '''simple docstring''' __snake_case : List[Any] = BasicTokenizer(do_lower_case=__a , strip_accents=__a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] ) def A_ ( self : int ) -> Any: '''simple docstring''' __snake_case : int = BasicTokenizer(do_lower_case=__a , strip_accents=__a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def A_ ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' __snake_case : Union[str, Any] = BasicTokenizer(do_lower_case=__a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def A_ ( self : List[str] ) -> Union[str, Any]: '''simple docstring''' __snake_case : Dict = BasicTokenizer(do_lower_case=__a ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] ) def A_ ( self : Any ) -> List[str]: '''simple docstring''' __snake_case : str = BasicTokenizer(do_lower_case=__a , strip_accents=__a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] ) def A_ ( self : Union[str, Any] ) -> Optional[Any]: '''simple docstring''' __snake_case : List[Any] = BasicTokenizer(do_lower_case=__a , strip_accents=__a ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] ) def A_ ( self : Optional[int] ) -> List[str]: '''simple docstring''' __snake_case : Optional[Any] = BasicTokenizer(do_lower_case=__a , never_split=['[UNK]'] ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] ) def A_ ( self : Optional[int] ) -> List[Any]: '''simple docstring''' __snake_case : Any = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing'] __snake_case : List[Any] = {} for i, token in enumerate(__a ): __snake_case : List[str] = i __snake_case : Any = WordpieceTokenizer(vocab=__a , unk_token='[UNK]' ) self.assertListEqual(tokenizer.tokenize('' ) , [] ) self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] ) self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] ) @require_torch def A_ ( self : Union[str, Any] ) -> Tuple: '''simple docstring''' __snake_case : Optional[Any] = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' ) __snake_case : int = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] __snake_case : str = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102] __snake_case : Union[str, Any] = tokenizer(__a , padding=__a , return_tensors='pt' ) self.assertIsInstance(__a , __a ) __snake_case : int = list(batch.input_ids.numpy()[0] ) self.assertListEqual(__a , __a ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) def A_ ( self : Union[str, Any] ) -> Any: '''simple docstring''' self.assertTrue(_is_whitespace(' ' ) ) self.assertTrue(_is_whitespace('\t' ) ) self.assertTrue(_is_whitespace('\r' ) ) self.assertTrue(_is_whitespace('\n' ) ) self.assertTrue(_is_whitespace('\u00A0' ) ) self.assertFalse(_is_whitespace('A' ) ) self.assertFalse(_is_whitespace('-' ) ) def A_ ( self : Dict ) -> Optional[Any]: '''simple docstring''' self.assertTrue(_is_control('\u0005' ) ) self.assertFalse(_is_control('A' ) ) self.assertFalse(_is_control(' ' ) ) self.assertFalse(_is_control('\t' ) ) self.assertFalse(_is_control('\r' ) ) def A_ ( self : List[Any] ) -> int: '''simple docstring''' self.assertTrue(_is_punctuation('-' ) ) self.assertTrue(_is_punctuation('$' ) ) self.assertTrue(_is_punctuation('`' ) ) self.assertTrue(_is_punctuation('.' ) ) self.assertFalse(_is_punctuation('A' ) ) self.assertFalse(_is_punctuation(' ' ) ) @slow def A_ ( self : str ) -> Optional[int]: '''simple docstring''' __snake_case : str = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' ) __snake_case : Optional[int] = tokenizer.encode('sequence builders' , add_special_tokens=__a ) __snake_case : Optional[int] = tokenizer.encode('multi-sequence build' , add_special_tokens=__a ) __snake_case : Optional[Any] = tokenizer.build_inputs_with_special_tokens(__a ) __snake_case : List[Any] = tokenizer.build_inputs_with_special_tokens(__a , __a ) assert encoded_sentence == text + [102] assert encoded_pair == text + [102] + text_a + [102]
0
1
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TextClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. A__ : List[str] = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''} @is_pipeline_test class snake_case__ ( unittest.TestCase ): A__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING A__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: A__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: A__ = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } @require_torch def A_ ( self : Any ) -> List[Any]: '''simple docstring''' __snake_case : Tuple = pipeline( task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' ) __snake_case : int = text_classifier('This is great !' ) self.assertEqual(nested_simplify(__a ) , [{'label': 'LABEL_0', 'score': 0.5_0_4}] ) __snake_case : Tuple = text_classifier('This is great !' , top_k=2 ) self.assertEqual( nested_simplify(__a ) , [{'label': 'LABEL_0', 'score': 0.5_0_4}, {'label': 'LABEL_1', 'score': 0.4_9_6}] ) __snake_case : Any = text_classifier(['This is great !', 'This is bad'] , top_k=2 ) self.assertEqual( nested_simplify(__a ) , [ [{'label': 'LABEL_0', 'score': 0.5_0_4}, {'label': 'LABEL_1', 'score': 0.4_9_6}], [{'label': 'LABEL_0', 'score': 0.5_0_4}, {'label': 'LABEL_1', 'score': 0.4_9_6}], ] , ) __snake_case : List[Any] = text_classifier('This is great !' , top_k=1 ) self.assertEqual(nested_simplify(__a ) , [{'label': 'LABEL_0', 'score': 0.5_0_4}] ) # Legacy behavior __snake_case : Optional[Any] = text_classifier('This is great !' , return_all_scores=__a ) self.assertEqual(nested_simplify(__a ) , [{'label': 'LABEL_0', 'score': 0.5_0_4}] ) __snake_case : Optional[Any] = text_classifier('This is great !' , return_all_scores=__a ) self.assertEqual( nested_simplify(__a ) , [[{'label': 'LABEL_0', 'score': 0.5_0_4}, {'label': 'LABEL_1', 'score': 0.4_9_6}]] ) __snake_case : str = text_classifier(['This is great !', 'Something else'] , return_all_scores=__a ) self.assertEqual( nested_simplify(__a ) , [ [{'label': 'LABEL_0', 'score': 0.5_0_4}, {'label': 'LABEL_1', 'score': 0.4_9_6}], [{'label': 'LABEL_0', 'score': 0.5_0_4}, {'label': 'LABEL_1', 'score': 0.4_9_6}], ] , ) __snake_case : Tuple = text_classifier(['This is great !', 'Something else'] , return_all_scores=__a ) self.assertEqual( nested_simplify(__a ) , [ {'label': 'LABEL_0', 'score': 0.5_0_4}, {'label': 'LABEL_0', 'score': 0.5_0_4}, ] , ) @require_torch def A_ ( self : Optional[Any] ) -> Optional[int]: '''simple docstring''' import torch __snake_case : Optional[Any] = pipeline( task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' , device=torch.device('cpu' ) , ) __snake_case : Dict = text_classifier('This is great !' ) self.assertEqual(nested_simplify(__a ) , [{'label': 'LABEL_0', 'score': 0.5_0_4}] ) @require_tf def A_ ( self : Optional[Any] ) -> Tuple: '''simple docstring''' __snake_case : Dict = pipeline( task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='tf' ) __snake_case : Dict = text_classifier('This is great !' ) self.assertEqual(nested_simplify(__a ) , [{'label': 'LABEL_0', 'score': 0.5_0_4}] ) @slow @require_torch def A_ ( self : Dict ) -> Union[str, Any]: '''simple docstring''' __snake_case : int = pipeline('text-classification' ) __snake_case : Optional[Any] = text_classifier('This is great !' ) self.assertEqual(nested_simplify(__a ) , [{'label': 'POSITIVE', 'score': 1.0}] ) __snake_case : List[str] = text_classifier('This is bad !' ) self.assertEqual(nested_simplify(__a ) , [{'label': 'NEGATIVE', 'score': 1.0}] ) __snake_case : Optional[Any] = text_classifier('Birds are a type of animal' ) self.assertEqual(nested_simplify(__a ) , [{'label': 'POSITIVE', 'score': 0.9_8_8}] ) @slow @require_tf def A_ ( self : Dict ) -> Dict: '''simple docstring''' __snake_case : Optional[Any] = pipeline('text-classification' , framework='tf' ) __snake_case : Dict = text_classifier('This is great !' ) self.assertEqual(nested_simplify(__a ) , [{'label': 'POSITIVE', 'score': 1.0}] ) __snake_case : List[str] = text_classifier('This is bad !' ) self.assertEqual(nested_simplify(__a ) , [{'label': 'NEGATIVE', 'score': 1.0}] ) __snake_case : List[Any] = text_classifier('Birds are a type of animal' ) self.assertEqual(nested_simplify(__a ) , [{'label': 'POSITIVE', 'score': 0.9_8_8}] ) def A_ ( self : str , __a : str , __a : Tuple , __a : int ) -> Any: '''simple docstring''' __snake_case : List[str] = TextClassificationPipeline(model=__a , tokenizer=__a ) return text_classifier, ["HuggingFace is in", "This is another test"] def A_ ( self : Any , __a : List[str] , __a : Dict ) -> Tuple: '''simple docstring''' __snake_case : Optional[int] = text_classifier.model # Small inputs because BartTokenizer tiny has maximum position embeddings = 22 __snake_case : Union[str, Any] = 'HuggingFace is in' __snake_case : Tuple = text_classifier(__a ) self.assertEqual(nested_simplify(__a ) , [{'label': ANY(__a ), 'score': ANY(__a )}] ) self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() ) __snake_case : int = ['HuggingFace is in ', 'Paris is in France'] __snake_case : Dict = text_classifier(__a ) self.assertEqual( nested_simplify(__a ) , [{'label': ANY(__a ), 'score': ANY(__a )}, {'label': ANY(__a ), 'score': ANY(__a )}] , ) self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() ) self.assertTrue(outputs[1]['label'] in model.config.idalabel.values() ) # Forcing to get all results with `top_k=None` # This is NOT the legacy format __snake_case : Tuple = text_classifier(__a , top_k=__a ) __snake_case : Optional[int] = len(model.config.idalabel.values() ) self.assertEqual( nested_simplify(__a ) , [[{'label': ANY(__a ), 'score': ANY(__a )}] * N, [{'label': ANY(__a ), 'score': ANY(__a )}] * N] , ) __snake_case : str = {'text': 'HuggingFace is in ', 'text_pair': 'Paris is in France'} __snake_case : Optional[int] = text_classifier(__a ) self.assertEqual( nested_simplify(__a ) , {'label': ANY(__a ), 'score': ANY(__a )} , ) self.assertTrue(outputs['label'] in model.config.idalabel.values() ) # This might be used a text pair, but tokenizer + pipe interaction # makes it hard to understand that it's not using the pair properly # https://github.com/huggingface/transformers/issues/17305 # We disabled this usage instead as it was outputting wrong outputs. __snake_case : Optional[Any] = [['HuggingFace is in ', 'Paris is in France']] with self.assertRaises(__a ): text_classifier(__a ) # This used to be valid for doing text pairs # We're keeping it working because of backward compatibility __snake_case : Tuple = text_classifier([[['HuggingFace is in ', 'Paris is in France']]] ) self.assertEqual( nested_simplify(__a ) , [{'label': ANY(__a ), 'score': ANY(__a )}] , ) self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A__ : Optional[Any] = { '''configuration_nllb_moe''': [ '''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NllbMoeConfig''', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Dict = [ '''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''NllbMoeForConditionalGeneration''', '''NllbMoeModel''', '''NllbMoePreTrainedModel''', '''NllbMoeTop2Router''', '''NllbMoeSparseMLP''', ] if TYPE_CHECKING: from .configuration_nllb_moe import ( NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, NllbMoeConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nllb_moe import ( NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST, NllbMoeForConditionalGeneration, NllbMoeModel, NllbMoePreTrainedModel, NllbMoeSparseMLP, NllbMoeTopaRouter, ) else: import sys A__ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
0
1
'''simple docstring''' import unittest import numpy as np from transformers import DistilBertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.distilbert.modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, ) class snake_case__ ( unittest.TestCase ): def __init__( self : Union[str, Any] , __a : Any , __a : int=13 , __a : Dict=7 , __a : Union[str, Any]=True , __a : Optional[Any]=True , __a : List[Any]=True , __a : Tuple=True , __a : Union[str, Any]=99 , __a : Dict=32 , __a : Dict=5 , __a : str=4 , __a : Optional[int]=37 , __a : str="gelu" , __a : Any=0.1 , __a : Optional[int]=0.1 , __a : List[Any]=512 , __a : Any=16 , __a : Optional[int]=2 , __a : str=0.0_2 , __a : Union[str, Any]=4 , ) -> List[str]: '''simple docstring''' __snake_case : List[Any] = parent __snake_case : Optional[int] = batch_size __snake_case : List[Any] = seq_length __snake_case : int = is_training __snake_case : int = use_attention_mask __snake_case : List[Any] = use_token_type_ids __snake_case : Tuple = use_labels __snake_case : Dict = vocab_size __snake_case : Tuple = hidden_size __snake_case : List[str] = num_hidden_layers __snake_case : int = num_attention_heads __snake_case : Union[str, Any] = intermediate_size __snake_case : str = hidden_act __snake_case : Optional[int] = hidden_dropout_prob __snake_case : Optional[int] = attention_probs_dropout_prob __snake_case : List[Any] = max_position_embeddings __snake_case : Union[str, Any] = type_vocab_size __snake_case : str = type_sequence_label_size __snake_case : Tuple = initializer_range __snake_case : Optional[Any] = num_choices def A_ ( self : Dict ) -> Optional[Any]: '''simple docstring''' __snake_case : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : Optional[int] = None if self.use_attention_mask: __snake_case : Dict = random_attention_mask([self.batch_size, self.seq_length] ) __snake_case : Optional[int] = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=__a , ) return config, input_ids, attention_mask def A_ ( self : Any ) -> Tuple: '''simple docstring''' __snake_case : Tuple = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case : Optional[int] = config_and_inputs __snake_case : List[str] = {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_flax class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): A__ = ( ( FlaxDistilBertModel, FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertForQuestionAnswering, ) if is_flax_available() else () ) def A_ ( self : Dict ) -> List[Any]: '''simple docstring''' __snake_case : Optional[int] = FlaxDistilBertModelTester(self ) @slow def A_ ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' for model_class_name in self.all_model_classes: __snake_case : Optional[int] = model_class_name.from_pretrained('distilbert-base-uncased' ) __snake_case : List[Any] = model(np.ones((1, 1) ) ) self.assertIsNotNone(__a ) @require_flax class snake_case__ ( unittest.TestCase ): @slow def A_ ( self : Any ) -> Dict: '''simple docstring''' __snake_case : Optional[int] = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased' ) __snake_case : Tuple = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) __snake_case : Tuple = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) __snake_case : str = model(__a , attention_mask=__a )[0] __snake_case : List[str] = (1, 11, 768) self.assertEqual(output.shape , __a ) __snake_case : Dict = np.array([[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __a , atol=1e-4 ) )
0
'''simple docstring''' def a_ ( _UpperCAmelCase : int ) -> list: # bit count represents no. of bits in the gray code if bit_count < 0: raise ValueError('The given input must be positive' ) # get the generated string sequence __snake_case : Optional[Any] = gray_code_sequence_string(_UpperCAmelCase ) # # convert them to integers for i in range(len(_UpperCAmelCase ) ): __snake_case : Optional[Any] = int(sequence[i] ,2 ) return sequence def a_ ( _UpperCAmelCase : int ) -> list: # The approach is a recursive one # Base case achieved when either n = 0 or n=1 if bit_count == 0: return ["0"] if bit_count == 1: return ["0", "1"] __snake_case : Dict = 1 << bit_count # defines the length of the sequence # 1<< n is equivalent to 2^n # recursive answer will generate answer for n-1 bits __snake_case : Dict = gray_code_sequence_string(bit_count - 1 ) __snake_case : Any = [] # append 0 to first half of the smaller sequence generated for i in range(seq_len // 2 ): __snake_case : str = '0' + smaller_sequence[i] sequence.append(_UpperCAmelCase ) # append 1 to second half ... start from the end of the list for i in reversed(range(seq_len // 2 ) ): __snake_case : Any = '1' + smaller_sequence[i] sequence.append(_UpperCAmelCase ) return sequence if __name__ == "__main__": import doctest doctest.testmod()
0
1
'''simple docstring''' import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger A__ : List[str] = get_logger(__name__) A__ : int = Path(__file__).parent / '''model_card_template.md''' A__ : str = uuida().hex A__ : Optional[Any] = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES A__ : Optional[int] = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES A__ : Any = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/''' def a_ ( _UpperCAmelCase : Union[Dict, str, None] = None ) -> str: __snake_case : List[str] = f'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}''' if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += f'''; torch/{_torch_version}''' if is_flax_available(): ua += f'''; jax/{_jax_version}''' ua += f'''; flax/{_flax_version}''' if is_onnx_available(): ua += f'''; onnxruntime/{_onnxruntime_version}''' # CI will set this value to True if os.environ.get('DIFFUSERS_IS_CI' ,'' ).upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(_UpperCAmelCase ,_UpperCAmelCase ): ua += "; " + "; ".join(f'''{k}/{v}''' for k, v in user_agent.items() ) elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ): ua += "; " + user_agent return ua def a_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Optional[str] = None ,_UpperCAmelCase : Optional[str] = None ) -> str: if token is None: __snake_case : Optional[int] = HfFolder.get_token() if organization is None: __snake_case : Union[str, Any] = whoami(_UpperCAmelCase )['name'] return f'''{username}/{model_id}''' else: return f'''{organization}/{model_id}''' def a_ ( _UpperCAmelCase : Dict ,_UpperCAmelCase : List[Any] ) -> int: if not is_jinja_available(): raise ValueError( 'Modelcard rendering is based on Jinja templates.' ' Please make sure to have `jinja` installed before using `create_model_card`.' ' To install it, please run `pip install Jinja2`.' ) if hasattr(_UpperCAmelCase ,'local_rank' ) and args.local_rank not in [-1, 0]: return __snake_case : str = args.hub_token if hasattr(_UpperCAmelCase ,'hub_token' ) else None __snake_case : Optional[int] = get_full_repo_name(_UpperCAmelCase ,token=_UpperCAmelCase ) __snake_case : int = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language='en' ,license='apache-2.0' ,library_name='diffusers' ,tags=[] ,datasets=args.dataset_name ,metrics=[] ,) ,template_path=_UpperCAmelCase ,model_name=_UpperCAmelCase ,repo_name=_UpperCAmelCase ,dataset_name=args.dataset_name if hasattr(_UpperCAmelCase ,'dataset_name' ) else None ,learning_rate=args.learning_rate ,train_batch_size=args.train_batch_size ,eval_batch_size=args.eval_batch_size ,gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(_UpperCAmelCase ,'gradient_accumulation_steps' ) else None ) ,adam_betaa=args.adam_betaa if hasattr(_UpperCAmelCase ,'adam_beta1' ) else None ,adam_betaa=args.adam_betaa if hasattr(_UpperCAmelCase ,'adam_beta2' ) else None ,adam_weight_decay=args.adam_weight_decay if hasattr(_UpperCAmelCase ,'adam_weight_decay' ) else None ,adam_epsilon=args.adam_epsilon if hasattr(_UpperCAmelCase ,'adam_epsilon' ) else None ,lr_scheduler=args.lr_scheduler if hasattr(_UpperCAmelCase ,'lr_scheduler' ) else None ,lr_warmup_steps=args.lr_warmup_steps if hasattr(_UpperCAmelCase ,'lr_warmup_steps' ) else None ,ema_inv_gamma=args.ema_inv_gamma if hasattr(_UpperCAmelCase ,'ema_inv_gamma' ) else None ,ema_power=args.ema_power if hasattr(_UpperCAmelCase ,'ema_power' ) else None ,ema_max_decay=args.ema_max_decay if hasattr(_UpperCAmelCase ,'ema_max_decay' ) else None ,mixed_precision=args.mixed_precision ,) __snake_case : List[Any] = os.path.join(args.output_dir ,'README.md' ) model_card.save(_UpperCAmelCase ) def a_ ( _UpperCAmelCase : Optional[str] ,_UpperCAmelCase : Optional[str] = None ) -> Dict: if resolved_file is None or commit_hash is not None: return commit_hash __snake_case : List[str] = str(Path(_UpperCAmelCase ).as_posix() ) __snake_case : List[Any] = re.search(r'snapshots/([^/]+)/' ,_UpperCAmelCase ) if search is None: return None __snake_case : str = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(_UpperCAmelCase ) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. A__ : Optional[Any] = os.path.expanduser( os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface''')) ) A__ : List[str] = os.path.join(hf_cache_home, '''diffusers''') def a_ ( _UpperCAmelCase : Optional[str] = None ,_UpperCAmelCase : Optional[str] = None ) -> None: if new_cache_dir is None: __snake_case : Tuple = DIFFUSERS_CACHE if old_cache_dir is None: __snake_case : str = old_diffusers_cache __snake_case : str = Path(_UpperCAmelCase ).expanduser() __snake_case : int = Path(_UpperCAmelCase ).expanduser() for old_blob_path in old_cache_dir.glob('**/blobs/*' ): if old_blob_path.is_file() and not old_blob_path.is_symlink(): __snake_case : Optional[Any] = new_cache_dir / old_blob_path.relative_to(_UpperCAmelCase ) new_blob_path.parent.mkdir(parents=_UpperCAmelCase ,exist_ok=_UpperCAmelCase ) os.replace(_UpperCAmelCase ,_UpperCAmelCase ) try: os.symlink(_UpperCAmelCase ,_UpperCAmelCase ) except OSError: logger.warning( 'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). A__ : str = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''') if not os.path.isfile(cache_version_file): A__ : List[Any] = 0 else: with open(cache_version_file) as f: try: A__ : Tuple = int(f.read()) except ValueError: A__ : List[str] = 0 if cache_version < 1: A__ : Any = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( '''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your ''' '''existing cached models. This is a one-time operation, you can interrupt it or run it ''' '''later by calling `diffusers.utils.hub_utils.move_cache()`.''' ) try: move_cache() except Exception as e: A__ : int = '''\n'''.join(traceback.format_tb(e.__traceback__)) logger.error( F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """ '''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole ''' '''message and we will do our best to help.''' ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, '''w''') as f: f.write('''1''') except Exception: logger.warning( F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """ '''the directory exists and can be written to.''' ) def a_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Optional[str] = None ) -> str: if variant is not None: __snake_case : List[Any] = weights_name.split('.' ) __snake_case : Dict = splits[:-1] + [variant] + splits[-1:] __snake_case : Optional[Any] = '.'.join(_UpperCAmelCase ) return weights_name def a_ ( _UpperCAmelCase : Dict ,*, _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : List[str]=None ,) -> Optional[Any]: __snake_case : Optional[Any] = str(_UpperCAmelCase ) if os.path.isfile(_UpperCAmelCase ): return pretrained_model_name_or_path elif os.path.isdir(_UpperCAmelCase ): if os.path.isfile(os.path.join(_UpperCAmelCase ,_UpperCAmelCase ) ): # Load from a PyTorch checkpoint __snake_case : Tuple = os.path.join(_UpperCAmelCase ,_UpperCAmelCase ) return model_file elif subfolder is not None and os.path.isfile( os.path.join(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) ): __snake_case : List[Any] = os.path.join(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) return model_file else: raise EnvironmentError( f'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(_UpperCAmelCase ).base_version ) >= version.parse('0.20.0' ) ): try: __snake_case : Optional[Any] = hf_hub_download( _UpperCAmelCase ,filename=_add_variant(_UpperCAmelCase ,_UpperCAmelCase ) ,cache_dir=_UpperCAmelCase ,force_download=_UpperCAmelCase ,proxies=_UpperCAmelCase ,resume_download=_UpperCAmelCase ,local_files_only=_UpperCAmelCase ,use_auth_token=_UpperCAmelCase ,user_agent=_UpperCAmelCase ,subfolder=_UpperCAmelCase ,revision=revision or commit_hash ,) warnings.warn( f'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' ,_UpperCAmelCase ,) return model_file except: # noqa: E722 warnings.warn( f'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(_UpperCAmelCase ,_UpperCAmelCase )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(_UpperCAmelCase ,_UpperCAmelCase )}\' so that the correct variant file can be added.''' ,_UpperCAmelCase ,) try: # 2. Load model file as usual __snake_case : List[Any] = hf_hub_download( _UpperCAmelCase ,filename=_UpperCAmelCase ,cache_dir=_UpperCAmelCase ,force_download=_UpperCAmelCase ,proxies=_UpperCAmelCase ,resume_download=_UpperCAmelCase ,local_files_only=_UpperCAmelCase ,use_auth_token=_UpperCAmelCase ,user_agent=_UpperCAmelCase ,subfolder=_UpperCAmelCase ,revision=revision or commit_hash ,) return model_file except RepositoryNotFoundError: raise EnvironmentError( f'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier ''' 'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a ' 'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli ' 'login`.' ) except RevisionNotFoundError: raise EnvironmentError( f'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for ''' 'this model name. Check the model page at ' f'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' ) except EntryNotFoundError: raise EnvironmentError( f'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' ) except HTTPError as err: raise EnvironmentError( f'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' ) except ValueError: raise EnvironmentError( f'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it''' f''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a''' f''' directory containing a file named {weights_name} or''' ' \nCheckout your internet connection or see how to run the library in' ' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' ) except EnvironmentError: raise EnvironmentError( f'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from ''' '\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. ' f'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory ''' f'''containing a file named {weights_name}''' )
0
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer from transformers.testing_utils import require_tokenizers, require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor @require_tokenizers @require_vision class snake_case__ ( unittest.TestCase ): def A_ ( self : int ) -> List[Any]: '''simple docstring''' __snake_case : Any = tempfile.mkdtemp() # fmt: off __snake_case : List[str] = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest'] # fmt: on __snake_case : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) __snake_case : List[str] = { 'do_resize': True, 'size': {'height': 18, 'width': 18}, 'do_normalize': True, 'image_mean': [0.5, 0.5, 0.5], 'image_std': [0.5, 0.5, 0.5], } __snake_case : Optional[Any] = os.path.join(self.tmpdirname , __a ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(__a , __a ) def A_ ( self : Optional[int] , **__a : Dict ) -> int: '''simple docstring''' return BertTokenizer.from_pretrained(self.tmpdirname , **__a ) def A_ ( self : int , **__a : Dict ) -> Tuple: '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname , **__a ) def A_ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def A_ ( self : str ) -> List[str]: '''simple docstring''' __snake_case : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] __snake_case : List[str] = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs] return image_inputs def A_ ( self : List[str] ) -> Optional[int]: '''simple docstring''' __snake_case : Union[str, Any] = self.get_tokenizer() __snake_case : Dict = self.get_image_processor() __snake_case : Any = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) processor.save_pretrained(self.tmpdirname ) __snake_case : Any = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor.image_processor , __a ) def A_ ( self : str ) -> Optional[int]: '''simple docstring''' __snake_case : Optional[Any] = VisionTextDualEncoderProcessor( tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __snake_case : Optional[Any] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) __snake_case : Tuple = self.get_image_processor(do_normalize=__a , padding_value=1.0 ) __snake_case : Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained( self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__a , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __a ) def A_ ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' __snake_case : Tuple = self.get_image_processor() __snake_case : int = self.get_tokenizer() __snake_case : str = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) __snake_case : int = self.prepare_image_inputs() __snake_case : List[str] = image_processor(__a , return_tensors='np' ) __snake_case : List[str] = processor(images=__a , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def A_ ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' __snake_case : Dict = self.get_image_processor() __snake_case : int = self.get_tokenizer() __snake_case : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) __snake_case : Optional[int] = 'lower newer' __snake_case : Dict = processor(text=__a ) __snake_case : List[Any] = tokenizer(__a ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def A_ ( self : List[Any] ) -> Optional[Any]: '''simple docstring''' __snake_case : Dict = self.get_image_processor() __snake_case : Union[str, Any] = self.get_tokenizer() __snake_case : int = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) __snake_case : List[Any] = 'lower newer' __snake_case : Optional[Any] = self.prepare_image_inputs() __snake_case : Union[str, Any] = processor(text=__a , images=__a ) self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with self.assertRaises(__a ): processor() def A_ ( self : Tuple ) -> Any: '''simple docstring''' __snake_case : Union[str, Any] = self.get_image_processor() __snake_case : Any = self.get_tokenizer() __snake_case : Dict = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) __snake_case : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __snake_case : int = processor.batch_decode(__a ) __snake_case : Optional[Any] = tokenizer.batch_decode(__a ) self.assertListEqual(__a , __a ) def A_ ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' __snake_case : List[str] = self.get_image_processor() __snake_case : Dict = self.get_tokenizer() __snake_case : Dict = VisionTextDualEncoderProcessor(tokenizer=__a , image_processor=__a ) __snake_case : Union[str, Any] = 'lower newer' __snake_case : Tuple = self.prepare_image_inputs() __snake_case : Union[str, Any] = processor(text=__a , images=__a ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
0
1
'''simple docstring''' import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin A__ : Any = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''') class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): A__ = BartphoTokenizer A__ = False A__ = True def A_ ( self : Any ) -> Union[str, Any]: '''simple docstring''' super().setUp() __snake_case : Any = ['▁This', '▁is', '▁a', '▁t', 'est'] __snake_case : Union[str, Any] = dict(zip(__a , range(len(__a ) ) ) ) __snake_case : Tuple = {'unk_token': '<unk>'} __snake_case : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['monolingual_vocab_file'] ) with open(self.monolingual_vocab_file , 'w' , encoding='utf-8' ) as fp: for token in vocab_tokens: fp.write(f'''{token} {vocab_tokens[token]}\n''' ) __snake_case : int = BartphoTokenizer(__a , self.monolingual_vocab_file , **self.special_tokens_map ) tokenizer.save_pretrained(self.tmpdirname ) def A_ ( self : Tuple , **__a : Dict ) -> int: '''simple docstring''' kwargs.update(self.special_tokens_map ) return BartphoTokenizer.from_pretrained(self.tmpdirname , **__a ) def A_ ( self : Optional[Any] , __a : List[str] ) -> List[str]: '''simple docstring''' __snake_case : Dict = 'This is a là test' __snake_case : Tuple = 'This is a<unk><unk> test' return input_text, output_text def A_ ( self : int ) -> Optional[int]: '''simple docstring''' __snake_case : List[str] = BartphoTokenizer(__a , self.monolingual_vocab_file , **self.special_tokens_map ) __snake_case : Tuple = 'This is a là test' __snake_case : int = '▁This ▁is ▁a ▁l à ▁t est'.split() __snake_case : str = tokenizer.tokenize(__a ) self.assertListEqual(__a , __a ) __snake_case : Tuple = tokens + [tokenizer.unk_token] __snake_case : Union[str, Any] = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
0
'''simple docstring''' import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def a_ ( _UpperCAmelCase : List[Any] ) -> Tuple: __snake_case : str = [] embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''', f'''stage{idx}.patch_embed.proj.weight''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''', f'''stage{idx}.patch_embed.proj.bias''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''', f'''stage{idx}.patch_embed.norm.weight''', ) ) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''', f'''stage{idx}.patch_embed.norm.bias''', ) ) return embed def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : Optional[int] ) -> List[str]: __snake_case : Tuple = [] attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj.weight''', ) ) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj.bias''', ) ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') ) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') ) return attention_weights def a_ ( _UpperCAmelCase : Union[str, Any] ) -> Dict: __snake_case : Union[str, Any] = [] token.append((f'''cvt.encoder.stages.{idx}.cls_token''', 'stage2.cls_token') ) return token def a_ ( ) -> Optional[Any]: __snake_case : Any = [] head.append(('layernorm.weight', 'norm.weight') ) head.append(('layernorm.bias', 'norm.bias') ) head.append(('classifier.weight', 'head.weight') ) head.append(('classifier.bias', 'head.bias') ) return head def a_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Any ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[Any] ) -> Tuple: __snake_case : List[str] = 'imagenet-1k-id2label.json' __snake_case : Dict = 10_00 __snake_case : Union[str, Any] = 'huggingface/label-files' __snake_case : str = num_labels __snake_case : str = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase ,_UpperCAmelCase ,repo_type='dataset' ) ) ,'r' ) ) __snake_case : Tuple = {int(_UpperCAmelCase ): v for k, v in idalabel.items()} __snake_case : Optional[Any] = idalabel __snake_case : str = {v: k for k, v in idalabel.items()} __snake_case : Dict = CvtConfig(num_labels=_UpperCAmelCase ,idalabel=_UpperCAmelCase ,labelaid=_UpperCAmelCase ) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit('/' ,1 )[-1][4:6] == "13": __snake_case : Tuple = [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit('/' ,1 )[-1][4:6] == "21": __snake_case : str = [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: __snake_case : Dict = [2, 2, 20] __snake_case : Any = [3, 12, 16] __snake_case : Tuple = [1_92, 7_68, 10_24] __snake_case : str = CvtForImageClassification(_UpperCAmelCase ) __snake_case : List[Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ) __snake_case : int = image_size __snake_case : int = torch.load(_UpperCAmelCase ,map_location=torch.device('cpu' ) ) __snake_case : List[Any] = OrderedDict() __snake_case : Union[str, Any] = [] for idx in range(len(config.depth ) ): if config.cls_token[idx]: __snake_case : Optional[Any] = list_of_state_dict + cls_token(_UpperCAmelCase ) __snake_case : Tuple = list_of_state_dict + embeddings(_UpperCAmelCase ) for cnt in range(config.depth[idx] ): __snake_case : Optional[int] = list_of_state_dict + attention(_UpperCAmelCase ,_UpperCAmelCase ) __snake_case : str = list_of_state_dict + final() for gg in list_of_state_dict: print(_UpperCAmelCase ) for i in range(len(_UpperCAmelCase ) ): __snake_case : List[str] = original_weights[list_of_state_dict[i][1]] model.load_state_dict(_UpperCAmelCase ) model.save_pretrained(_UpperCAmelCase ) image_processor.save_pretrained(_UpperCAmelCase ) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": A__ : Dict = argparse.ArgumentParser() parser.add_argument( '''--cvt_model''', default='''cvt-w24''', type=str, help='''Name of the cvt model you\'d like to convert.''', ) parser.add_argument( '''--image_size''', default=3_8_4, type=int, help='''Input Image Size''', ) parser.add_argument( '''--cvt_file_name''', default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''', type=str, help='''Input Image Size''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) A__ : Tuple = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
0
1
'''simple docstring''' import multiprocessing import os from typing import BinaryIO, Optional, Union import fsspec from .. import Dataset, Features, NamedSplit, config from ..formatting import query_table from ..packaged_modules.json.json import Json from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class snake_case__ ( SCREAMING_SNAKE_CASE_ ): def __init__( self : Optional[Any] , __a : NestedDataStructureLike[PathLike] , __a : Optional[NamedSplit] = None , __a : Optional[Features] = None , __a : str = None , __a : bool = False , __a : bool = False , __a : Optional[str] = None , __a : Optional[int] = None , **__a : Dict , ) -> Any: '''simple docstring''' super().__init__( __a , split=__a , features=__a , cache_dir=__a , keep_in_memory=__a , streaming=__a , num_proc=__a , **__a , ) __snake_case : List[str] = field __snake_case : Optional[int] = path_or_paths if isinstance(__a , __a ) else {self.split: path_or_paths} __snake_case : List[str] = Json( cache_dir=__a , data_files=__a , features=__a , field=__a , **__a , ) def A_ ( self : str ) -> List[str]: '''simple docstring''' # Build iterable dataset if self.streaming: __snake_case : Union[str, Any] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: __snake_case : str = None __snake_case : str = None __snake_case : int = None __snake_case : List[str] = None self.builder.download_and_prepare( download_config=__a , download_mode=__a , verification_mode=__a , base_path=__a , num_proc=self.num_proc , ) __snake_case : Union[str, Any] = self.builder.as_dataset( split=self.split , verification_mode=__a , in_memory=self.keep_in_memory ) return dataset class snake_case__ : def __init__( self : List[str] , __a : Dataset , __a : Union[PathLike, BinaryIO] , __a : Optional[int] = None , __a : Optional[int] = None , **__a : Tuple , ) -> List[str]: '''simple docstring''' if num_proc is not None and num_proc <= 0: raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' ) __snake_case : Tuple = dataset __snake_case : Union[str, Any] = path_or_buf __snake_case : int = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE __snake_case : int = num_proc __snake_case : List[Any] = 'utf-8' __snake_case : List[Any] = to_json_kwargs def A_ ( self : str ) -> int: '''simple docstring''' __snake_case : List[Any] = self.to_json_kwargs.pop('path_or_buf' , __a ) __snake_case : Tuple = self.to_json_kwargs.pop('orient' , 'records' ) __snake_case : Dict = self.to_json_kwargs.pop('lines' , True if orient == 'records' else False ) __snake_case : Optional[int] = self.to_json_kwargs.pop('index' , False if orient in ['split', 'table'] else True ) __snake_case : Union[str, Any] = self.to_json_kwargs.pop('compression' , __a ) if compression not in [None, "infer", "gzip", "bz2", "xz"]: raise NotImplementedError(f'''`datasets` currently does not support {compression} compression''' ) if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with fsspec.open(self.path_or_buf , 'wb' , compression=__a ) as buffer: __snake_case : Optional[int] = self._write(file_obj=__a , orient=__a , lines=__a , index=__a , **self.to_json_kwargs ) else: if compression: raise NotImplementedError( f'''The compression parameter is not supported when writing to a buffer, but compression={compression}''' ' was passed. Please provide a local path instead.' ) __snake_case : List[Any] = self._write( file_obj=self.path_or_buf , orient=__a , lines=__a , index=__a , **self.to_json_kwargs ) return written def A_ ( self : Optional[Any] , __a : str ) -> List[Any]: '''simple docstring''' __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = args __snake_case : Optional[int] = query_table( table=self.dataset.data , key=slice(__a , offset + self.batch_size ) , indices=self.dataset._indices , ) __snake_case : Optional[int] = batch.to_pandas().to_json( path_or_buf=__a , orient=__a , lines=__a , index=__a , **__a ) if not json_str.endswith('\n' ): json_str += "\n" return json_str.encode(self.encoding ) def A_ ( self : str , __a : BinaryIO , __a : Union[str, Any] , __a : Union[str, Any] , __a : Tuple , **__a : Optional[int] , ) -> int: '''simple docstring''' __snake_case : int = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ): __snake_case : Dict = self._batch_json((offset, orient, lines, index, to_json_kwargs) ) written += file_obj.write(__a ) else: __snake_case , __snake_case : Union[str, Any] = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for json_str in logging.tqdm( pool.imap( self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __a , __a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ): written += file_obj.write(__a ) return written
0
'''simple docstring''' from __future__ import annotations A__ : List[Any] = list[list[int]] # assigning initial values to the grid A__ : Matrix = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution A__ : Matrix = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def a_ ( _UpperCAmelCase : Matrix ,_UpperCAmelCase : int ,_UpperCAmelCase : int ,_UpperCAmelCase : int ) -> bool: for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def a_ ( _UpperCAmelCase : Matrix ) -> tuple[int, int] | None: for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def a_ ( _UpperCAmelCase : Matrix ) -> Matrix | None: if location := find_empty_location(_UpperCAmelCase ): __snake_case , __snake_case : Optional[int] = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 ,10 ): if is_safe(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ): __snake_case : Union[str, Any] = digit if sudoku(_UpperCAmelCase ) is not None: return grid __snake_case : Optional[Any] = 0 return None def a_ ( _UpperCAmelCase : Matrix ) -> None: for row in grid: for cell in row: print(_UpperCAmelCase ,end=' ' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print('''\nExample grid:\n''' + '''=''' * 2_0) print_solution(example_grid) print('''\nExample grid solution:''') A__ : List[str] = sudoku(example_grid) if solution is not None: print_solution(solution) else: print('''Cannot find a solution.''')
0
1
'''simple docstring''' def a_ ( _UpperCAmelCase : float ,_UpperCAmelCase : float ) -> float: return price * (1 + tax_rate) if __name__ == "__main__": print(F"""{price_plus_tax(1_0_0, 0.25) = }""") print(F"""{price_plus_tax(1_25.50, 0.05) = }""")
0
'''simple docstring''' import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import enable_full_determinism, skip_mps from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ): A__ = KandinskyVaaPriorPipeline A__ = ['''prompt'''] A__ = ['''prompt''', '''negative_prompt'''] A__ = [ '''num_images_per_prompt''', '''generator''', '''num_inference_steps''', '''latents''', '''negative_prompt''', '''guidance_scale''', '''output_type''', '''return_dict''', ] A__ = False @property def A_ ( self : Dict ) -> List[str]: '''simple docstring''' return 32 @property def A_ ( self : Any ) -> str: '''simple docstring''' return 32 @property def A_ ( self : str ) -> Optional[int]: '''simple docstring''' return self.time_input_dim @property def A_ ( self : str ) -> int: '''simple docstring''' return self.time_input_dim * 4 @property def A_ ( self : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' return 100 @property def A_ ( self : Tuple ) -> List[str]: '''simple docstring''' __snake_case : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) return tokenizer @property def A_ ( self : Dict ) -> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) __snake_case : Union[str, Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(__a ) @property def A_ ( self : Union[str, Any] ) -> Any: '''simple docstring''' torch.manual_seed(0 ) __snake_case : Any = { 'num_attention_heads': 2, 'attention_head_dim': 12, 'embedding_dim': self.text_embedder_hidden_size, 'num_layers': 1, } __snake_case : List[Any] = PriorTransformer(**__a ) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 __snake_case : Any = nn.Parameter(torch.ones(model.clip_std.shape ) ) return model @property def A_ ( self : List[str] ) -> List[str]: '''simple docstring''' torch.manual_seed(0 ) __snake_case : Optional[Any] = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , ) __snake_case : Optional[Any] = CLIPVisionModelWithProjection(__a ) return model @property def A_ ( self : Dict ) -> List[Any]: '''simple docstring''' __snake_case : Dict = CLIPImageProcessor( crop_size=224 , do_center_crop=__a , do_normalize=__a , do_resize=__a , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=224 , ) return image_processor def A_ ( self : Dict ) -> Optional[int]: '''simple docstring''' __snake_case : Tuple = self.dummy_prior __snake_case : List[str] = self.dummy_image_encoder __snake_case : str = self.dummy_text_encoder __snake_case : List[str] = self.dummy_tokenizer __snake_case : List[str] = self.dummy_image_processor __snake_case : Any = UnCLIPScheduler( variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=__a , clip_sample_range=1_0.0 , ) __snake_case : str = { 'prior': prior, 'image_encoder': image_encoder, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'scheduler': scheduler, 'image_processor': image_processor, } return components def A_ ( self : List[Any] , __a : Optional[Any] , __a : Tuple=0 ) -> Any: '''simple docstring''' if str(__a ).startswith('mps' ): __snake_case : List[str] = torch.manual_seed(__a ) else: __snake_case : List[str] = torch.Generator(device=__a ).manual_seed(__a ) __snake_case : List[Any] = { 'prompt': 'horse', 'generator': generator, 'guidance_scale': 4.0, 'num_inference_steps': 2, 'output_type': 'np', } return inputs def A_ ( self : str ) -> Dict: '''simple docstring''' __snake_case : str = 'cpu' __snake_case : List[str] = self.get_dummy_components() __snake_case : Tuple = self.pipeline_class(**__a ) __snake_case : Optional[Any] = pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) __snake_case : Optional[int] = pipe(**self.get_dummy_inputs(__a ) ) __snake_case : List[str] = output.image_embeds __snake_case : str = pipe( **self.get_dummy_inputs(__a ) , return_dict=__a , )[0] __snake_case : Union[str, Any] = image[0, -10:] __snake_case : Any = image_from_tuple[0, -10:] assert image.shape == (1, 32) __snake_case : List[Any] = np.array( [-0.0_5_3_2, 1.7_1_2_0, 0.3_6_5_6, -1.0_8_5_2, -0.8_9_4_6, -1.1_7_5_6, 0.4_3_4_8, 0.2_4_8_2, 0.5_1_4_6, -0.1_1_5_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @skip_mps def A_ ( self : Tuple ) -> Optional[int]: '''simple docstring''' __snake_case : Union[str, Any] = torch_device == 'cpu' __snake_case : Dict = True __snake_case : Union[str, Any] = False self._test_inference_batch_single_identical( test_max_difference=__a , relax_max_difference=__a , test_mean_pixel_difference=__a , ) @skip_mps def A_ ( self : str ) -> Union[str, Any]: '''simple docstring''' __snake_case : Dict = torch_device == 'cpu' __snake_case : Optional[Any] = False self._test_attention_slicing_forward_pass( test_max_difference=__a , test_mean_pixel_difference=__a , )
0
1