code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 296 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def lowercase__ ( A_: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__UpperCAmelCase =k.replace(A_ , A_ )
if k.startswith("""encoder""" ):
__UpperCAmelCase =k.replace(""".attn""" , """.self_attn""" )
__UpperCAmelCase =k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase =k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__UpperCAmelCase =k.replace("""norm1""" , """self_attn_layer_norm""" )
__UpperCAmelCase =k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__UpperCAmelCase =k.replace("""norm3""" , """final_layer_norm""" )
return k
def lowercase__ ( A_: Tuple ) -> str:
"""simple docstring"""
__UpperCAmelCase =[
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__UpperCAmelCase =sd.pop(A_ )
__UpperCAmelCase =k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__UpperCAmelCase =v
__A = ["START"]
@torch.no_grad()
def lowercase__ ( A_: List[Any] , A_: str , A_: int ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase =torch.load(A_ , map_location="""cpu""" )
__UpperCAmelCase =model["""model"""]
__UpperCAmelCase =BlenderbotConfig.from_json_file(A_ )
__UpperCAmelCase =BlenderbotForConditionalGeneration(A_ )
__UpperCAmelCase =m.model.state_dict().keys()
__UpperCAmelCase =[]
__UpperCAmelCase ={}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__UpperCAmelCase =rename_state_dict_key(A_ )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__UpperCAmelCase =v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(A_ )
m.model.load_state_dict(A_ , strict=A_ )
m.half()
m.save_pretrained(A_ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
__A = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 68 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class __A ( lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = """speech_to_text"""
UpperCAmelCase__ = ["""past_key_values"""]
UpperCAmelCase__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , a__=1_0000 , a__=12 , a__=2048 , a__=4 , a__=6 , a__=2048 , a__=4 , a__=0.0 , a__=0.0 , a__=True , a__=True , a__="relu" , a__=256 , a__=0.1 , a__=0.0 , a__=0.0 , a__=0.02 , a__=2 , a__=True , a__=1 , a__=0 , a__=2 , a__=6000 , a__=1024 , a__=2 , a__=(5, 5) , a__=1024 , a__=80 , a__=1 , **a__ , ):
"""simple docstring"""
_lowerCamelCase : str = vocab_size
_lowerCamelCase : Optional[int] = d_model
_lowerCamelCase : List[str] = encoder_ffn_dim
_lowerCamelCase : Any = encoder_layers
_lowerCamelCase : Union[str, Any] = encoder_attention_heads
_lowerCamelCase : Dict = decoder_ffn_dim
_lowerCamelCase : List[Any] = decoder_layers
_lowerCamelCase : Optional[int] = decoder_attention_heads
_lowerCamelCase : Dict = dropout
_lowerCamelCase : List[Any] = attention_dropout
_lowerCamelCase : Dict = activation_dropout
_lowerCamelCase : Dict = activation_function
_lowerCamelCase : Tuple = init_std
_lowerCamelCase : List[str] = encoder_layerdrop
_lowerCamelCase : Tuple = decoder_layerdrop
_lowerCamelCase : str = use_cache
_lowerCamelCase : Dict = encoder_layers
_lowerCamelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True
_lowerCamelCase : Dict = max_source_positions
_lowerCamelCase : Tuple = max_target_positions
_lowerCamelCase : Dict = num_conv_layers
_lowerCamelCase : List[str] = list(a__)
_lowerCamelCase : str = conv_channels
_lowerCamelCase : Tuple = input_feat_per_channel
_lowerCamelCase : Any = input_channels
if len(self.conv_kernel_sizes) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '''
F"""but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes)}`, """
F"""`config.num_conv_layers = {self.num_conv_layers}`.""")
super().__init__(
pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , is_encoder_decoder=a__ , decoder_start_token_id=a__ , **a__ , )
| 613 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
_lowerCamelCase = logging.get_logger(__name__)
@dataclass
class __A :
"""simple docstring"""
UpperCAmelCase__ = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(glue_processors.keys() )} )
UpperCAmelCase__ = field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
UpperCAmelCase__ = field(
default=128 ,metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} ,)
UpperCAmelCase__ = field(
default=lowerCamelCase__ ,metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Dict = self.task_name.lower()
class __A ( lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = """train"""
UpperCAmelCase__ = """dev"""
UpperCAmelCase__ = """test"""
class __A ( lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
def __init__( self , a__ , a__ , a__ = None , a__ = Split.train , a__ = None , ):
"""simple docstring"""
warnings.warn(
'''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' , a__ , )
_lowerCamelCase : Optional[Any] = args
_lowerCamelCase : Tuple = glue_processors[args.task_name]()
_lowerCamelCase : Any = glue_output_modes[args.task_name]
if isinstance(a__ , a__):
try:
_lowerCamelCase : List[Any] = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''')
# Load data features from cache or dataset file
_lowerCamelCase : Tuple = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
_lowerCamelCase : int = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = label_list[2], label_list[1]
_lowerCamelCase : str = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCamelCase : Any = cached_features_file + '''.lock'''
with FileLock(a__):
if os.path.exists(a__) and not args.overwrite_cache:
_lowerCamelCase : Any = time.time()
_lowerCamelCase : Optional[Any] = torch.load(a__)
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start)
else:
logger.info(F"""Creating features from dataset file at {args.data_dir}""")
if mode == Split.dev:
_lowerCamelCase : List[str] = self.processor.get_dev_examples(args.data_dir)
elif mode == Split.test:
_lowerCamelCase : str = self.processor.get_test_examples(args.data_dir)
else:
_lowerCamelCase : List[Any] = self.processor.get_train_examples(args.data_dir)
if limit_length is not None:
_lowerCamelCase : List[Any] = examples[:limit_length]
_lowerCamelCase : List[str] = glue_convert_examples_to_features(
a__ , a__ , max_length=args.max_seq_length , label_list=a__ , output_mode=self.output_mode , )
_lowerCamelCase : int = time.time()
torch.save(self.features , a__)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""")
def __len__( self):
"""simple docstring"""
return len(self.features)
def __getitem__( self , a__):
"""simple docstring"""
return self.features[i]
def __snake_case ( self):
"""simple docstring"""
return self.label_list
| 613 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( __a ,__a ,__a ,unittest.TestCase ):
__SCREAMING_SNAKE_CASE :Tuple = AltDiffusionPipeline
__SCREAMING_SNAKE_CASE :Optional[Any] = TEXT_TO_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE :List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
__SCREAMING_SNAKE_CASE :Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE :Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
def snake_case__ ( self : Tuple ):
torch.manual_seed(0 )
__magic_name__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__magic_name__ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=a__ , set_alpha_to_one=a__ , )
torch.manual_seed(0 )
__magic_name__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
__magic_name__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , )
__magic_name__ = CLIPTextModel(a__ )
__magic_name__ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
__magic_name__ = 77
__magic_name__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def snake_case__ ( self : Dict , a__ : List[str] , a__ : int=0 ):
if str(a__ ).startswith('''mps''' ):
__magic_name__ = torch.manual_seed(a__ )
else:
__magic_name__ = torch.Generator(device=a__ ).manual_seed(a__ )
__magic_name__ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def snake_case__ ( self : str ):
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def snake_case__ ( self : Dict ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def snake_case__ ( self : Any ):
__magic_name__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__magic_name__ = self.get_dummy_components()
torch.manual_seed(0 )
__magic_name__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
__magic_name__ = RobertaSeriesModelWithTransformation(a__ )
__magic_name__ = text_encoder
__magic_name__ = AltDiffusionPipeline(**a__ )
__magic_name__ = alt_pipe.to(a__ )
alt_pipe.set_progress_bar_config(disable=a__ )
__magic_name__ = self.get_dummy_inputs(a__ )
__magic_name__ = '''A photo of an astronaut'''
__magic_name__ = alt_pipe(**a__ )
__magic_name__ = output.images
__magic_name__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__magic_name__ = np.array(
[0.5_748_162, 0.60_447_145, 0.48_821_217, 0.50_100_636, 0.5_431_185, 0.45_763_683, 0.49_657_696, 0.48_132_733, 0.47_573_093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case__ ( self : List[str] ):
__magic_name__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__magic_name__ = self.get_dummy_components()
__magic_name__ = PNDMScheduler(skip_prk_steps=a__ )
torch.manual_seed(0 )
__magic_name__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
__magic_name__ = RobertaSeriesModelWithTransformation(a__ )
__magic_name__ = text_encoder
__magic_name__ = AltDiffusionPipeline(**a__ )
__magic_name__ = alt_pipe.to(a__ )
alt_pipe.set_progress_bar_config(disable=a__ )
__magic_name__ = self.get_dummy_inputs(a__ )
__magic_name__ = alt_pipe(**a__ )
__magic_name__ = output.images
__magic_name__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__magic_name__ = np.array(
[0.51_605_093, 0.5_707_241, 0.47_365_507, 0.50_578_886, 0.5_633_877, 0.4_642_503, 0.5_182_081, 0.48_763_484, 0.49_084_237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def snake_case__ ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self : Optional[int] ):
# make sure here that pndm scheduler skips prk
__magic_name__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=a__ )
__magic_name__ = alt_pipe.to(a__ )
alt_pipe.set_progress_bar_config(disable=a__ )
__magic_name__ = '''A painting of a squirrel eating a burger'''
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = alt_pipe([prompt] , generator=a__ , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' )
__magic_name__ = output.images
__magic_name__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__magic_name__ = np.array([0.1_010, 0.0_800, 0.0_794, 0.0_885, 0.0_843, 0.0_762, 0.0_769, 0.0_729, 0.0_586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case__ ( self : Dict ):
__magic_name__ = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' )
__magic_name__ = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=a__ , safety_checker=a__ )
__magic_name__ = alt_pipe.to(a__ )
alt_pipe.set_progress_bar_config(disable=a__ )
__magic_name__ = '''A painting of a squirrel eating a burger'''
__magic_name__ = torch.manual_seed(0 )
__magic_name__ = alt_pipe([prompt] , generator=a__ , num_inference_steps=2 , output_type='''numpy''' )
__magic_name__ = output.images
__magic_name__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__magic_name__ = np.array([0.4_019, 0.4_052, 0.3_810, 0.4_119, 0.3_916, 0.3_982, 0.4_651, 0.4_195, 0.5_323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 432 |
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def UpperCamelCase ( a , a , a ) -> float:
'''simple docstring'''
__magic_name__ = x
__magic_name__ = y
for step in range(a ): # noqa: B007
__magic_name__ = a * a - b * b + x
__magic_name__ = 2 * a * b + y
__magic_name__ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def UpperCamelCase ( a ) -> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def UpperCamelCase ( a ) -> tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(a , 1 , 1 ) )
def UpperCamelCase ( a = 800 , a = 600 , a = -0.6 , a = 0 , a = 3.2 , a = 50 , a = True , ) -> Image.Image:
'''simple docstring'''
__magic_name__ = Image.new('''RGB''' , (image_width, image_height) )
__magic_name__ = img.load()
# loop through the image-coordinates
for image_x in range(a ):
for image_y in range(a ):
# determine the figure-coordinates based on the image-coordinates
__magic_name__ = figure_width / image_width * image_height
__magic_name__ = figure_center_x + (image_x / image_width - 0.5) * figure_width
__magic_name__ = figure_center_y + (image_y / image_height - 0.5) * figure_height
__magic_name__ = get_distance(a , a , a )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__magic_name__ = get_color_coded_rgb(a )
else:
__magic_name__ = get_black_and_white_rgb(a )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_lowerCAmelCase = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 432 | 1 |
'''simple docstring'''
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
_lowerCAmelCase :List[str] = logging.get_logger(__name__)
@add_end_docstrings(_UpperCamelCase )
class UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
def __init__( self , *lowercase__ , **lowercase__ ) -> Dict:
super().__init__(*lowercase__ , **lowercase__ )
requires_backends(self , 'decord' )
self.check_model_type(lowercase__ )
def _UpperCamelCase ( self , lowercase__=None , lowercase__=None , lowercase__=None ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : Any = {}
if frame_sampling_rate is not None:
SCREAMING_SNAKE_CASE : List[Any] = frame_sampling_rate
if num_frames is not None:
SCREAMING_SNAKE_CASE : Tuple = num_frames
SCREAMING_SNAKE_CASE : Optional[Any] = {}
if top_k is not None:
SCREAMING_SNAKE_CASE : str = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , lowercase__ , **lowercase__ ) -> Dict:
return super().__call__(lowercase__ , **lowercase__ )
def _UpperCamelCase ( self , lowercase__ , lowercase__=None , lowercase__=1 ) -> str:
if num_frames is None:
SCREAMING_SNAKE_CASE : Dict = self.model.config.num_frames
if video.startswith('http://' ) or video.startswith('https://' ):
SCREAMING_SNAKE_CASE : Optional[int] = BytesIO(requests.get(lowercase__ ).content )
SCREAMING_SNAKE_CASE : List[str] = VideoReader(lowercase__ )
videoreader.seek(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
SCREAMING_SNAKE_CASE : int = num_frames * frame_sampling_rate - 1
SCREAMING_SNAKE_CASE : Optional[int] = np.linspace(lowercase__ , lowercase__ , num=lowercase__ , dtype=np.intaa )
SCREAMING_SNAKE_CASE : str = videoreader.get_batch(lowercase__ ).asnumpy()
SCREAMING_SNAKE_CASE : Dict = list(lowercase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor(lowercase__ , return_tensors=self.framework )
return model_inputs
def _UpperCamelCase ( self , lowercase__ ) -> str:
SCREAMING_SNAKE_CASE : Any = self.model(**lowercase__ )
return model_outputs
def _UpperCamelCase ( self , lowercase__ , lowercase__=5 ) -> Tuple:
if top_k > self.model.config.num_labels:
SCREAMING_SNAKE_CASE : List[Any] = self.model.config.num_labels
if self.framework == "pt":
SCREAMING_SNAKE_CASE : Union[str, Any] = model_outputs.logits.softmax(-1 )[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = probs.topk(lowercase__ )
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
SCREAMING_SNAKE_CASE : Tuple = scores.tolist()
SCREAMING_SNAKE_CASE : Union[str, Any] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowercase__ , lowercase__ )]
| 715 | '''simple docstring'''
def __lowerCAmelCase ( a_ = 1000 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = 2**power
SCREAMING_SNAKE_CASE : Any = str(a_ )
SCREAMING_SNAKE_CASE : int = list(a_ )
SCREAMING_SNAKE_CASE : Any = 0
for i in list_num:
sum_of_num += int(a_ )
return sum_of_num
if __name__ == "__main__":
_lowerCAmelCase :Tuple = int(input("""Enter the power of 2: """).strip())
print("""2 ^ """, power, """ = """, 2**power)
_lowerCAmelCase :int = solution(power)
print("""Sum of the digits is: """, result)
| 179 | 0 |
'''simple docstring'''
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
_UpperCAmelCase : Optional[Any] = 'Usage of script: script_name <size_of_canvas:int>'
_UpperCAmelCase : List[str] = [0] * 1_00 + [1] * 10
random.shuffle(choice)
def UpperCamelCase ( lowercase_ : List[str] ) -> list[list[bool]]:
'''simple docstring'''
lowercase =[[False for i in range(_lowerCamelCase )] for j in range(_lowerCamelCase )]
return canvas
def UpperCamelCase ( lowercase_ : Dict ) -> None:
'''simple docstring'''
for i, row in enumerate(_lowerCamelCase ):
for j, _ in enumerate(_lowerCamelCase ):
lowercase =bool(random.getrandbits(1 ) )
def UpperCamelCase ( lowercase_ : Optional[int] ) -> list[list[bool]]:
'''simple docstring'''
lowercase =np.array(_lowerCamelCase )
lowercase =np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(_lowerCamelCase ):
for c, pt in enumerate(_lowerCamelCase ):
lowercase =__judge_point(
_lowerCamelCase , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
lowercase =next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
lowercase =current_canvas.tolist()
return return_canvas
def UpperCamelCase ( lowercase_ : Union[str, Any] , lowercase_ : Dict ) -> bool:
'''simple docstring'''
lowercase =0
lowercase =0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
lowercase =pt
if pt:
if alive < 2:
lowercase =False
elif alive == 2 or alive == 3:
lowercase =True
elif alive > 3:
lowercase =False
else:
if alive == 3:
lowercase =True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
_UpperCAmelCase : List[Any] = int(sys.argv[1])
# main working structure of this module.
_UpperCAmelCase : Any = create_canvas(canvas_size)
seed(c)
_UpperCAmelCase : Tuple = plt.subplots()
fig.show()
_UpperCAmelCase : Dict = ListedColormap(['''w''', '''k'''])
try:
while True:
_UpperCAmelCase : List[Any] = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 72 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Tuple , lowercase__ : TransformeraDModel , lowercase__ : AutoencoderKL , lowercase__ : KarrasDiffusionSchedulers , lowercase__ : Optional[Dict[int, str]] = None , ):
super().__init__()
self.register_modules(transformer=lowercase__ , vae=lowercase__ , scheduler=lowercase__ )
# create a imagenet -> id dictionary for easier use
__lowercase : int = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
__lowercase : List[str] = int(lowercase__ )
__lowercase : str = dict(sorted(self.labels.items() ) )
def snake_case ( self : str , lowercase__ : Union[str, List[str]] ):
if not isinstance(lowercase__ , lowercase__ ):
__lowercase : Any = list(lowercase__ )
for l in label:
if l not in self.labels:
raise ValueError(
f'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : List[str] , lowercase__ : List[int] , lowercase__ : float = 4.0 , lowercase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase__ : int = 5_0 , lowercase__ : Optional[str] = "pil" , lowercase__ : bool = True , ):
__lowercase : Dict = len(lowercase__ )
__lowercase : Tuple = self.transformer.config.sample_size
__lowercase : str = self.transformer.config.in_channels
__lowercase : int = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowercase__ , device=self.device , dtype=self.transformer.dtype , )
__lowercase : Tuple = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
__lowercase : str = torch.tensor(lowercase__ , device=self.device ).reshape(-1 )
__lowercase : Optional[int] = torch.tensor([1_0_0_0] * batch_size , device=self.device )
__lowercase : Any = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowercase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
__lowercase : List[Any] = latent_model_input[: len(lowercase__ ) // 2]
__lowercase : Optional[Any] = torch.cat([half, half] , dim=0 )
__lowercase : str = self.scheduler.scale_model_input(lowercase__ , lowercase__ )
__lowercase : List[str] = t
if not torch.is_tensor(lowercase__ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__lowercase : List[str] = latent_model_input.device.type == "mps"
if isinstance(lowercase__ , lowercase__ ):
__lowercase : Optional[int] = torch.floataa if is_mps else torch.floataa
else:
__lowercase : Dict = torch.intaa if is_mps else torch.intaa
__lowercase : List[Any] = torch.tensor([timesteps] , dtype=lowercase__ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
__lowercase : str = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__lowercase : List[Any] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
__lowercase : Optional[Any] = self.transformer(
lowercase__ , timestep=lowercase__ , class_labels=lowercase__ ).sample
# perform guidance
if guidance_scale > 1:
__lowercase ,__lowercase : Any = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__lowercase ,__lowercase : Optional[int] = torch.split(lowercase__ , len(lowercase__ ) // 2 , dim=0 )
__lowercase : int = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__lowercase : str = torch.cat([half_eps, half_eps] , dim=0 )
__lowercase : Optional[Any] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__lowercase ,__lowercase : int = torch.split(lowercase__ , lowercase__ , dim=1 )
else:
__lowercase : Optional[Any] = noise_pred
# compute previous image: x_t -> x_t-1
__lowercase : Optional[Any] = self.scheduler.step(lowercase__ , lowercase__ , lowercase__ ).prev_sample
if guidance_scale > 1:
__lowercase ,__lowercase : Tuple = latent_model_input.chunk(2 , dim=0 )
else:
__lowercase : Tuple = latent_model_input
__lowercase : Any = 1 / self.vae.config.scaling_factor * latents
__lowercase : Optional[int] = self.vae.decode(lowercase__ ).sample
__lowercase : str = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__lowercase : List[str] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowercase : int = self.numpy_to_pil(lowercase__ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowercase__ )
| 575 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A : str = {"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
"""UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""UniSpeechForCTC""",
"""UniSpeechForPreTraining""",
"""UniSpeechForSequenceClassification""",
"""UniSpeechModel""",
"""UniSpeechPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__A : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 720 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase_ ( lowercase__ , lowercase__ , lowercase__):
lowerCamelCase__ = list(range(len(lowercase__)))
lowerCamelCase__ = [v / w for v, w in zip(lowercase__ , lowercase__)]
index.sort(key=lambda lowercase__: ratio[i] , reverse=lowercase__)
lowerCamelCase__ = 0
lowerCamelCase__ = [0] * len(lowercase__)
for i in index:
if weight[i] <= capacity:
lowerCamelCase__ = 1
max_value += value[i]
capacity -= weight[i]
else:
lowerCamelCase__ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 187 | 0 |
class __SCREAMING_SNAKE_CASE :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = graph
self._normalize_graph(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase__ = len(__lowerCAmelCase )
UpperCamelCase__ = None
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
if sources is int:
UpperCamelCase__ = [sources]
if sinks is int:
UpperCamelCase__ = [sinks]
if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) == 0:
return
UpperCamelCase__ = sources[0]
UpperCamelCase__ = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__lowerCAmelCase ) > 1 or len(__lowerCAmelCase ) > 1:
UpperCamelCase__ = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
UpperCamelCase__ = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
UpperCamelCase__ = max_input_flow
UpperCamelCase__ = 0
UpperCamelCase__ = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
UpperCamelCase__ = max_input_flow
UpperCamelCase__ = size - 1
def _lowerCamelCase ( self ):
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = algorithm(self )
class __SCREAMING_SNAKE_CASE :
def __init__( self , __lowerCAmelCase ):
UpperCamelCase__ = flow_network
UpperCamelCase__ = flow_network.verticesCount
UpperCamelCase__ = flow_network.sourceIndex
UpperCamelCase__ = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
UpperCamelCase__ = flow_network.graph
UpperCamelCase__ = False
def _lowerCamelCase ( self ):
if not self.executed:
self._algorithm()
UpperCamelCase__ = True
def _lowerCamelCase ( self ):
pass
class __SCREAMING_SNAKE_CASE ( _UpperCAmelCase ):
def __init__( self , __lowerCAmelCase ):
super().__init__(__lowerCAmelCase )
# use this to save your result
UpperCamelCase__ = -1
def _lowerCamelCase ( self ):
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class __SCREAMING_SNAKE_CASE ( _UpperCAmelCase ):
def __init__( self , __lowerCAmelCase ):
super().__init__(__lowerCAmelCase )
UpperCamelCase__ = [[0] * self.verticies_count for i in range(self.verticies_count )]
UpperCamelCase__ = [0] * self.verticies_count
UpperCamelCase__ = [0] * self.verticies_count
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
UpperCamelCase__ = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
UpperCamelCase__ = 0
while i < len(__lowerCAmelCase ):
UpperCamelCase__ = vertices_list[i]
UpperCamelCase__ = self.heights[vertex_index]
self.process_vertex(__lowerCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__lowerCAmelCase ) )
UpperCamelCase__ = 0
else:
i += 1
UpperCamelCase__ = sum(self.preflow[self.source_index] )
def _lowerCamelCase ( self , __lowerCAmelCase ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__lowerCAmelCase , __lowerCAmelCase )
self.relabel(__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
UpperCamelCase__ = self.heights[to_index]
if min_height is not None:
UpperCamelCase__ = min_height + 1
if __name__ == "__main__":
UpperCamelCase__ = [0]
UpperCamelCase__ = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
UpperCamelCase__ = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
UpperCamelCase__ = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
UpperCamelCase__ = flow_network.find_maximum_flow()
print(f"""maximum flow is {maximum_flow}""")
| 619 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"configuration_roberta_prelayernorm": [
"ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP",
"RobertaPreLayerNormConfig",
"RobertaPreLayerNormOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaPreLayerNormForCausalLM",
"RobertaPreLayerNormForMaskedLM",
"RobertaPreLayerNormForMultipleChoice",
"RobertaPreLayerNormForQuestionAnswering",
"RobertaPreLayerNormForSequenceClassification",
"RobertaPreLayerNormForTokenClassification",
"RobertaPreLayerNormModel",
"RobertaPreLayerNormPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaPreLayerNormForCausalLM",
"TFRobertaPreLayerNormForMaskedLM",
"TFRobertaPreLayerNormForMultipleChoice",
"TFRobertaPreLayerNormForQuestionAnswering",
"TFRobertaPreLayerNormForSequenceClassification",
"TFRobertaPreLayerNormForTokenClassification",
"TFRobertaPreLayerNormMainLayer",
"TFRobertaPreLayerNormModel",
"TFRobertaPreLayerNormPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"FlaxRobertaPreLayerNormForCausalLM",
"FlaxRobertaPreLayerNormForMaskedLM",
"FlaxRobertaPreLayerNormForMultipleChoice",
"FlaxRobertaPreLayerNormForQuestionAnswering",
"FlaxRobertaPreLayerNormForSequenceClassification",
"FlaxRobertaPreLayerNormForTokenClassification",
"FlaxRobertaPreLayerNormModel",
"FlaxRobertaPreLayerNormPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 532 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : Any = logging.get_logger(__name__)
__UpperCAmelCase : int = "▁"
__UpperCAmelCase : Any = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : List[Any] = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
__UpperCAmelCase : Optional[int] = {
"google/pegasus-xsum": 5_1_2,
}
class _snake_case ( _A ):
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = PegasusTokenizer
_A = ['input_ids', 'attention_mask']
def __init__( self ,UpperCamelCase=None ,UpperCamelCase=None ,UpperCamelCase="<pad>" ,UpperCamelCase="</s>" ,UpperCamelCase="<unk>" ,UpperCamelCase="<mask_2>" ,UpperCamelCase="<mask_1>" ,UpperCamelCase=None ,UpperCamelCase=103 ,**UpperCamelCase ,) -> Optional[int]:
snake_case__ :List[str] = offset
if additional_special_tokens is not None:
if not isinstance(UpperCAmelCase__ ,UpperCAmelCase__ ):
raise TypeError(
f'additional_special_tokens should be of type {type(UpperCAmelCase__ )}, but is'
f' {type(UpperCAmelCase__ )}' )
snake_case__ :Any = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'<unk_{i}>' for i in range(len(UpperCAmelCase__ ) ,self.offset - 1 )
]
if len(set(UpperCAmelCase__ ) ) != len(UpperCAmelCase__ ):
raise ValueError(
"Please make sure that the provided additional_special_tokens do not contain an incorrectly"
f' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.' )
snake_case__ :Any = additional_special_tokens_extended
else:
snake_case__ :int = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'<unk_{i}>' for i in range(2 ,self.offset )]
super().__init__(
UpperCAmelCase__ ,tokenizer_file=UpperCAmelCase__ ,pad_token=UpperCAmelCase__ ,eos_token=UpperCAmelCase__ ,unk_token=UpperCAmelCase__ ,mask_token=UpperCAmelCase__ ,mask_token_sent=UpperCAmelCase__ ,offset=UpperCAmelCase__ ,additional_special_tokens=UpperCAmelCase__ ,**UpperCAmelCase__ ,)
snake_case__ :Tuple = vocab_file
snake_case__ :Any = False if not self.vocab_file else True
def lowerCAmelCase_ ( self ,UpperCamelCase ) -> Optional[Any]:
snake_case__ :List[Any] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"There should be 3 special tokens: mask_token, pad_token, and eos_token +"
f' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}' )
return [1 if x in all_special_ids else 0 for x in seq]
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ,UpperCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(UpperCAmelCase__ )
elif token_ids_a is None:
return self._special_token_mask(UpperCAmelCase__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCAmelCase_ ( self ,UpperCamelCase ,UpperCamelCase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
snake_case__ :Optional[int] = os.path.join(
UpperCAmelCase__ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ):
copyfile(self.vocab_file ,UpperCAmelCase__ )
return (out_vocab_file,) | 721 |
from __future__ import annotations
import math
def lowercase_ ( __snake_case : int , __snake_case : int , __snake_case : bool , __snake_case : list[int] , __snake_case : float ) -> int:
'''simple docstring'''
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__snake_case ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
return min(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
def lowercase_ ( ) -> None:
'''simple docstring'''
snake_case__ :List[Any] = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
snake_case__ :int = math.log(len(__snake_case ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , __snake_case , __snake_case , __snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 57 | 0 |
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_lowerCAmelCase: Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
def _lowercase( __a : Union[List, PIL.Image.Image, torch.Tensor] ):
warnings.warn(
'The preprocess method is deprecated and will be removed in a future version. Please'
' use VaeImageProcessor.preprocess instead' , __a , )
if isinstance(__a , torch.Tensor ):
return image
elif isinstance(__a , PIL.Image.Image ):
a__ =[image]
if isinstance(image[0] , PIL.Image.Image ):
a__ , a__ =image[0].size
a__ , a__ =(x - x % 8 for x in (w, h)) # resize to integer multiple of 8
a__ =[np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
a__ =np.concatenate(__a , axis=0 )
a__ =np.array(__a ).astype(np.floataa ) / 2_55.0
a__ =image.transpose(0 , 3 , 1 , 2 )
a__ =2.0 * image - 1.0
a__ =torch.from_numpy(__a )
elif isinstance(image[0] , torch.Tensor ):
a__ =torch.cat(__a , dim=0 )
return image
def _lowercase( __a : Union[List, PIL.Image.Image, torch.Tensor] ):
if isinstance(__a , torch.Tensor ):
return mask
elif isinstance(__a , PIL.Image.Image ):
a__ =[mask]
if isinstance(mask[0] , PIL.Image.Image ):
a__ , a__ =mask[0].size
a__ , a__ =(x - x % 32 for x in (w, h)) # resize to integer multiple of 32
a__ =[np.array(m.convert('L' ).resize((w, h) , resample=PIL_INTERPOLATION['nearest'] ) )[None, :] for m in mask]
a__ =np.concatenate(__a , axis=0 )
a__ =mask.astype(np.floataa ) / 2_55.0
a__ =0
a__ =1
a__ =torch.from_numpy(__a )
elif isinstance(mask[0] , torch.Tensor ):
a__ =torch.cat(__a , dim=0 )
return mask
class lowercase_ (lowercase__ ):
snake_case =42
snake_case =42
def __init__( self , lowercase_ , lowercase_) -> Tuple:
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_)
@torch.no_grad()
def __call__( self , lowercase_ , lowercase_ , lowercase_ = 250 , lowercase_ = 0.0 , lowercase_ = 10 , lowercase_ = 10 , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ) -> Union[ImagePipelineOutput, Tuple]:
a__ =image
a__ =_preprocess_image(lowercase_)
a__ =original_image.to(device=self.device , dtype=self.unet.dtype)
a__ =_preprocess_mask(lowercase_)
a__ =mask_image.to(device=self.device , dtype=self.unet.dtype)
a__ =original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(lowercase_ , lowercase_) and len(lowercase_) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(lowercase_)}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""")
a__ =original_image.shape
a__ =randn_tensor(lowercase_ , generator=lowercase_ , device=self.device , dtype=self.unet.dtype)
# set step values
self.scheduler.set_timesteps(lowercase_ , lowercase_ , lowercase_ , self.device)
a__ =eta
a__ =self.scheduler.timesteps[0] + 1
a__ =generator[0] if isinstance(lowercase_ , lowercase_) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
if t < t_last:
# predict the noise residual
a__ =self.unet(lowercase_ , lowercase_).sample
# compute previous image: x_t -> x_t-1
a__ =self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
a__ =self.scheduler.undo_step(lowercase_ , lowercase_ , lowercase_)
a__ =t
a__ =(image / 2 + 0.5).clamp(0 , 1)
a__ =image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
a__ =self.numpy_to_pil(lowercase_)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_)
| 20 | import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = tempfile.mkdtemp()
_A = BlipImageProcessor()
_A = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
_A = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
_A = InstructBlipProcessor(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).tokenizer
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> Optional[Any]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).image_processor
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> str:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ ).qformer_tokenizer
def UpperCAmelCase ( self ) -> str:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_A = [Image.fromarray(np.moveaxis(lowerCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase ( self ) -> List[Any]:
_A = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
_A = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_A = self.get_image_processor(do_normalize=lowerCAmelCase_ , padding_value=1.0 )
_A = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase_ )
self.assertIsInstance(processor.qformer_tokenizer , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = self.get_qformer_tokenizer()
_A = InstructBlipProcessor(
tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , qformer_tokenizer=lowerCAmelCase_ )
_A = self.prepare_image_inputs()
_A = image_processor(lowerCAmelCase_ , return_tensors="""np""" )
_A = processor(images=lowerCAmelCase_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase ( self ) -> List[str]:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = self.get_qformer_tokenizer()
_A = InstructBlipProcessor(
tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , qformer_tokenizer=lowerCAmelCase_ )
_A = """lower newer"""
_A = processor(text=lowerCAmelCase_ )
_A = tokenizer(lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
_A = qformer_tokenizer(lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def UpperCAmelCase ( self ) -> Any:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = self.get_qformer_tokenizer()
_A = InstructBlipProcessor(
tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , qformer_tokenizer=lowerCAmelCase_ )
_A = """lower newer"""
_A = self.prepare_image_inputs()
_A = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def UpperCAmelCase ( self ) -> Dict:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = self.get_qformer_tokenizer()
_A = InstructBlipProcessor(
tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , qformer_tokenizer=lowerCAmelCase_ )
_A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_A = processor.batch_decode(lowerCAmelCase_ )
_A = tokenizer.batch_decode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self.get_image_processor()
_A = self.get_tokenizer()
_A = self.get_qformer_tokenizer()
_A = InstructBlipProcessor(
tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ , qformer_tokenizer=lowerCAmelCase_ )
_A = """lower newer"""
_A = self.prepare_image_inputs()
_A = processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
| 401 | 0 |
A : Tuple = 8.314_462 # Unit - J mol-1 K-1
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("Invalid inputs. Enter positive value." )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 718 |
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = psutil.Process()
__lowerCAmelCase = False
def snake_case ( self ):
__lowerCAmelCase = -1
while True:
__lowerCAmelCase = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def snake_case ( self ):
__lowerCAmelCase = True
__lowerCAmelCase = threading.Thread(target=self.peak_monitor )
__lowerCAmelCase = True
self.thread.start()
def snake_case ( self ):
__lowerCAmelCase = False
self.thread.join()
return self.cpu_memory_peak
A : Any = PeakCPUMemory()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCAmelCase = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCAmelCase = torch.cuda.memory_allocated(_UpperCamelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCAmelCase = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
__lowerCAmelCase = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCAmelCase = (torch.cuda.memory_allocated(_UpperCamelCase ) - start_measures[str(_UpperCamelCase )]) / 2**20
__lowerCAmelCase = (torch.cuda.max_memory_allocated(_UpperCamelCase ) - start_measures[str(_UpperCamelCase )]) / 2**20
return measures
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
print(f"{description}:" )
print(f"- Time: {measures['time']:.2f}s" )
for i in range(torch.cuda.device_count() ):
print(f"- GPU {i} allocated: {measures[str(_UpperCamelCase )]:.2f}MiB" )
__lowerCAmelCase = measures[f"{i}-peak"]
print(f"- GPU {i} peak: {peak:.2f}MiB" )
print(f"- CPU RAM allocated: {measures['cpu']:.2f}MiB" )
print(f"- CPU RAM peak: {measures['cpu-peak']:.2f}MiB" )
| 282 | 0 |
"""simple docstring"""
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def __lowerCAmelCase ( __UpperCamelCase : str = "" ):
'''simple docstring'''
snake_case_ : Optional[int] = url or """https://www.imdb.com/chart/top/?ref_=nv_mv_250"""
snake_case_ : Dict = BeautifulSoup(requests.get(__UpperCamelCase ).text , """html.parser""" )
snake_case_ : str = soup.find_all("""td""" , attrs="""titleColumn""" )
snake_case_ : Union[str, Any] = soup.find_all("""td""" , class_="""ratingColumn imdbRating""" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(__UpperCamelCase , __UpperCamelCase )
}
def __lowerCAmelCase ( __UpperCamelCase : str = "IMDb_Top_250_Movies.csv" ):
'''simple docstring'''
snake_case_ : int = get_imdb_top_aaa_movies()
with open(__UpperCamelCase , """w""" , newline="""""" ) as out_file:
snake_case_ : str = csv.writer(__UpperCamelCase )
writer.writerow(["""Movie title""", """IMDb rating"""] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 58 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCAmelCase : Dict = logging.get_logger(__name__)
# General docstring
lowerCAmelCase : str = 'RegNetConfig'
# Base docstring
lowerCAmelCase : str = 'facebook/regnet-y-040'
lowerCAmelCase : Dict = [1, 10_88, 7, 7]
# Image classification docstring
lowerCAmelCase : Dict = 'facebook/regnet-y-040'
lowerCAmelCase : int = 'tabby, tabby cat'
lowerCAmelCase : int = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ = 3 , A_ = 1 , A_ = 1 , A_ = "relu" , **A_ , )-> str:
'''simple docstring'''
super().__init__(**A_ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
UpperCamelCase = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
UpperCamelCase = tf.keras.layers.ConvaD(
filters=A_ , kernel_size=A_ , strides=A_ , padding='VALID' , groups=A_ , use_bias=A_ , name='convolution' , )
UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='normalization' )
UpperCamelCase = ACTaFN[activation] if activation is not None else tf.identity
def UpperCAmelCase_ ( self , A_ )-> Any:
'''simple docstring'''
UpperCamelCase = self.convolution(self.padding(A_ ) )
UpperCamelCase = self.normalization(A_ )
UpperCamelCase = self.activation(A_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , **A_ )-> Optional[Any]:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = config.num_channels
UpperCamelCase = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def UpperCAmelCase_ ( self , A_ )-> List[Any]:
'''simple docstring'''
UpperCamelCase = shape_list(A_ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
UpperCamelCase = tf.transpose(A_ , perm=(0, 2, 3, 1) )
UpperCamelCase = self.embedder(A_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ = 2 , **A_ )-> List[Any]:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = tf.keras.layers.ConvaD(
filters=A_ , kernel_size=1 , strides=A_ , use_bias=A_ , name='convolution' )
UpperCamelCase = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='normalization' )
def UpperCAmelCase_ ( self , A_ , A_ = False )-> tf.Tensor:
'''simple docstring'''
return self.normalization(self.convolution(A_ ) , training=A_ )
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ , **A_ )-> Optional[Any]:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A_ , name='pooler' )
UpperCamelCase = [
tf.keras.layers.ConvaD(filters=A_ , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=A_ , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def UpperCAmelCase_ ( self , A_ )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.pooler(A_ )
for layer_module in self.attention:
UpperCamelCase = layer_module(A_ )
UpperCamelCase = hidden_state * pooled
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ , A_ , A_ = 1 , **A_ )-> Dict:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = in_channels != out_channels or stride != 1
UpperCamelCase = max(1 , out_channels // config.groups_width )
UpperCamelCase = (
TFRegNetShortCut(A_ , stride=A_ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
UpperCamelCase = [
TFRegNetConvLayer(A_ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
A_ , stride=A_ , groups=A_ , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(A_ , kernel_size=1 , activation=A_ , name='layer.2' ),
]
UpperCamelCase = ACTaFN[config.hidden_act]
def UpperCAmelCase_ ( self , A_ )-> Tuple:
'''simple docstring'''
UpperCamelCase = hidden_state
for layer_module in self.layers:
UpperCamelCase = layer_module(A_ )
UpperCamelCase = self.shortcut(A_ )
hidden_state += residual
UpperCamelCase = self.activation(A_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ , A_ , A_ = 1 , **A_ )-> Any:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = in_channels != out_channels or stride != 1
UpperCamelCase = max(1 , out_channels // config.groups_width )
UpperCamelCase = (
TFRegNetShortCut(A_ , stride=A_ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
UpperCamelCase = [
TFRegNetConvLayer(A_ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
A_ , stride=A_ , groups=A_ , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(A_ , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(A_ , kernel_size=1 , activation=A_ , name='layer.3' ),
]
UpperCamelCase = ACTaFN[config.hidden_act]
def UpperCAmelCase_ ( self , A_ )-> List[Any]:
'''simple docstring'''
UpperCamelCase = hidden_state
for layer_module in self.layers:
UpperCamelCase = layer_module(A_ )
UpperCamelCase = self.shortcut(A_ )
hidden_state += residual
UpperCamelCase = self.activation(A_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , A_ , A_ , A_ = 2 , A_ = 2 , **A_ )-> Dict:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
UpperCamelCase = [
# downsampling is done in the first layer with stride of 2
layer(A_ , A_ , A_ , stride=A_ , name='layers.0' ),
*[layer(A_ , A_ , A_ , name=F'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def UpperCAmelCase_ ( self , A_ )-> List[Any]:
'''simple docstring'''
for layer_module in self.layers:
UpperCamelCase = layer_module(A_ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
def __init__( self , A_ , **A_ )-> str:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
A_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(A_ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(A_ , A_ , A_ , depth=A_ , name=F'''stages.{i+1}''' ) )
def UpperCAmelCase_ ( self , A_ , A_ = False , A_ = True )-> TFBaseModelOutputWithNoAttention:
'''simple docstring'''
UpperCamelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCamelCase = hidden_states + (hidden_state,)
UpperCamelCase = stage_module(A_ )
if output_hidden_states:
UpperCamelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=A_ , hidden_states=A_ )
@keras_serializable
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer):
lowerCAmelCase_ = RegNetConfig
def __init__( self , A_ , **A_ )-> Union[str, Any]:
'''simple docstring'''
super().__init__(**A_ )
UpperCamelCase = config
UpperCamelCase = TFRegNetEmbeddings(A_ , name='embedder' )
UpperCamelCase = TFRegNetEncoder(A_ , name='encoder' )
UpperCamelCase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=A_ , name='pooler' )
@unpack_inputs
def UpperCAmelCase_ ( self , A_ , A_ = None , A_ = None , A_ = False , )-> TFBaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.embedder(A_ , training=A_ )
UpperCamelCase = self.encoder(
A_ , output_hidden_states=A_ , return_dict=A_ , training=A_ )
UpperCamelCase = encoder_outputs[0]
UpperCamelCase = self.pooler(A_ )
# Change to NCHW output format have uniformity in the modules
UpperCamelCase = tf.transpose(A_ , perm=(0, 3, 1, 2) )
UpperCamelCase = tf.transpose(A_ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
UpperCamelCase = tuple([tf.transpose(A_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A_ , pooler_output=A_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = RegNetConfig
lowerCAmelCase_ = """regnet"""
lowerCAmelCase_ = """pixel_values"""
@property
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
lowerCAmelCase : str = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCAmelCase : List[str] = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , snake_case_ , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ , *A_ , **A_ )-> List[Any]:
'''simple docstring'''
super().__init__(A_ , *A_ , **A_ )
UpperCamelCase = TFRegNetMainLayer(A_ , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=A_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def UpperCAmelCase_ ( self , A_ , A_ = None , A_ = None , A_=False , )-> Union[TFBaseModelOutputWithPoolingAndNoAttention, Tuple[tf.Tensor]]:
'''simple docstring'''
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.regnet(
pixel_values=A_ , output_hidden_states=A_ , return_dict=A_ , training=A_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , snake_case_ , )
class SCREAMING_SNAKE_CASE__ ( snake_case_ , snake_case_):
def __init__( self , A_ , *A_ , **A_ )-> str:
'''simple docstring'''
super().__init__(A_ , *A_ , **A_ )
UpperCamelCase = config.num_labels
UpperCamelCase = TFRegNetMainLayer(A_ , name='regnet' )
# classification head
UpperCamelCase = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(A_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=A_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def UpperCAmelCase_ ( self , A_ = None , A_ = None , A_ = None , A_ = None , A_=False , )-> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
'''simple docstring'''
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.regnet(
A_ , output_hidden_states=A_ , return_dict=A_ , training=A_ )
UpperCamelCase = outputs.pooler_output if return_dict else outputs[1]
UpperCamelCase = self.classifier[0](A_ )
UpperCamelCase = self.classifier[1](A_ )
UpperCamelCase = None if labels is None else self.hf_compute_loss(labels=A_ , logits=A_ )
if not return_dict:
UpperCamelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=A_ , logits=A_ , hidden_states=outputs.hidden_states )
| 3 | 0 |
"""simple docstring"""
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class lowercase( __a ):
'''simple docstring'''
def __init__( self: int, a_: int, a_: int, a_: Any=1_024, a_: List[Any]=1_024, a_: Optional[Any]=3.6 ):
'''simple docstring'''
_snake_case : Union[str, Any] = tokenizer
_snake_case : int = tokenizer.bos_token_id
_snake_case : Any = dataset
_snake_case : str = seq_length
_snake_case : List[Any] = seq_length * chars_per_token * num_of_sequences
def __iter__( self: Optional[int] ):
'''simple docstring'''
_snake_case : Optional[int] = iter(self.dataset )
_snake_case : Optional[int] = True
while more_examples:
_snake_case : Optional[Any] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(a_ )["""content"""] )
buffer_len += len(buffer[-1] )
except StopIteration:
_snake_case : Any = False
break
_snake_case : List[str] = tokenizer(a_, truncation=a_ )["""input_ids"""]
_snake_case : Any = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0, len(a_ ), self.seq_length ):
_snake_case : Union[str, Any] = all_token_ids[i : i + self.seq_length]
if len(a_ ) == self.seq_length:
yield torch.tensor(a_ )
def UpperCAmelCase__ (snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : Tuple = {"""streaming""": True}
_snake_case : Dict = load_dataset(args.dataset_name , split="""train""" , **snake_case__ )
_snake_case : Union[str, Any] = ConstantLengthDataset(snake_case__ , snake_case__ , seq_length=args.seq_length )
_snake_case : str = DataLoader(snake_case__ , batch_size=args.batch_size )
return eval_dataloader
def UpperCAmelCase__ (snake_case__ : int ):
"""simple docstring"""
model.eval()
_snake_case : int = []
for step, batch in enumerate(snake_case__ ):
with torch.no_grad():
_snake_case : int = model(snake_case__ , labels=snake_case__ )
_snake_case : List[Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(snake_case__ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
_snake_case : List[str] = torch.mean(torch.cat(snake_case__ ) )
try:
_snake_case : Tuple = torch.exp(snake_case__ )
except OverflowError:
_snake_case : Dict = float("""inf""" )
return loss.item(), perplexity.item()
# Setup Accelerator
A_ = Accelerator()
# Parse configuration
A_ = HfArgumentParser(EvaluationArguments)
A_ = parser.parse_args()
set_seed(args.seed)
# Logging
A_ = logging.getLogger(__name__)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
# Load model and tokenizer
A_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
A_ = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
A_ = create_dataloader(args)
# Prepare everything with our `accelerator`.
A_ , A_ = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('''Evaluating and saving model after training''')
A_ , A_ = evaluate(args)
logger.info(F'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 701 |
"""simple docstring"""
from typing import Any
def UpperCAmelCase__ (snake_case__ : list ):
"""simple docstring"""
if not input_list:
return []
_snake_case : List[Any] = [input_list.count(snake_case__ ) for value in input_list]
_snake_case : Optional[int] = max(snake_case__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(snake_case__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 0 |
"""simple docstring"""
from __future__ import annotations
class lowercase__ :
"""simple docstring"""
def __init__( self , _A=None ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = data
UpperCamelCase : Optional[Any] = None
def __repr__( self ):
'''simple docstring'''
UpperCamelCase : Dict = []
UpperCamelCase : Dict = self
while temp:
string_rep.append(f"""{temp.data}""" )
UpperCamelCase : Union[str, Any] = temp.next
return "->".join(_A )
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
if not elements_list:
raise Exception("""The Elements List is empty""" )
UpperCamelCase : Tuple = Node(elements_list[0] )
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase : Optional[Any] = Node(elements_list[i] )
UpperCamelCase : Optional[int] = current.next
return head
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
if head_node is not None and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
print_reverse(head_node.next )
print(head_node.data )
def UpperCamelCase ():
from doctest import testmod
testmod()
UpperCamelCase : Dict = make_linked_list([14, 52, 14, 12, 43] )
print("""Linked List:""" )
print(SCREAMING_SNAKE_CASE )
print("""Elements in Reverse:""" )
print_reverse(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 102 | import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class _lowerCAmelCase :
def __init__( self : Any , __snake_case : int ):
lowerCamelCase :Union[str, Any] = data
lowerCamelCase :Optional[int] = [0X67_45_23_01, 0Xef_cd_ab_89, 0X98_ba_dc_fe, 0X10_32_54_76, 0Xc3_d2_e1_f0]
@staticmethod
def snake_case ( __snake_case : List[str] , __snake_case : List[Any] ):
return ((n << b) | (n >> (32 - b))) & 0Xff_ff_ff_ff
def snake_case ( self : Optional[int] ):
lowerCamelCase :Union[str, Any] = B'''\x80''' + B'''\x00''' * (63 - (len(self.data ) + 8) % 64)
lowerCamelCase :List[Any] = self.data + padding + struct.pack('''>Q''' , 8 * len(self.data ) )
return padded_data
def snake_case ( self : Optional[Any] ):
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def snake_case ( self : Optional[Any] , __snake_case : str ):
lowerCamelCase :Union[str, Any] = list(struct.unpack('''>16L''' , __snake_case ) ) + [0] * 64
for i in range(16 , 80 ):
lowerCamelCase :int = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def snake_case ( self : int ):
lowerCamelCase :Optional[Any] = self.padding()
lowerCamelCase :str = self.split_blocks()
for block in self.blocks:
lowerCamelCase :int = self.expand_block(__snake_case )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :List[Any] = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
lowerCamelCase :int = (b & c) | ((~b) & d)
lowerCamelCase :Any = 0X5a_82_79_99
elif 20 <= i < 40:
lowerCamelCase :Optional[Any] = b ^ c ^ d
lowerCamelCase :Optional[Any] = 0X6e_d9_eb_a1
elif 40 <= i < 60:
lowerCamelCase :List[Any] = (b & c) | (b & d) | (c & d)
lowerCamelCase :List[str] = 0X8f_1b_bc_dc
elif 60 <= i < 80:
lowerCamelCase :Optional[Any] = b ^ c ^ d
lowerCamelCase :Dict = 0Xca_62_c1_d6
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :Any = (
self.rotate(__snake_case , 5 ) + f + e + k + expanded_block[i] & 0Xff_ff_ff_ff,
a,
self.rotate(__snake_case , 30 ),
c,
d,
)
lowerCamelCase :List[str] = (
self.h[0] + a & 0Xff_ff_ff_ff,
self.h[1] + b & 0Xff_ff_ff_ff,
self.h[2] + c & 0Xff_ff_ff_ff,
self.h[3] + d & 0Xff_ff_ff_ff,
self.h[4] + e & 0Xff_ff_ff_ff,
)
return ("{:08x}" * 5).format(*self.h )
def _lowerCamelCase ( ):
lowerCamelCase :Any = B'''Test String'''
assert SHAaHash(a_).final_hash() == hashlib.shaa(a_).hexdigest() # noqa: S324
def _lowerCamelCase ( ):
lowerCamelCase :str = argparse.ArgumentParser(description='''Process some strings or files''')
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''')
lowerCamelCase :Optional[Any] = parser.parse_args()
lowerCamelCase :Union[str, Any] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''') as f:
lowerCamelCase :Tuple = f.read()
else:
lowerCamelCase :Optional[Any] = bytes(a_ , '''utf-8''')
print(SHAaHash(a_).final_hash())
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 166 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__lowercase )
class UpperCAmelCase_ ( __lowercase ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
lowerCamelCase : str = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
lowerCamelCase : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
lowerCamelCase : ClassVar[Features] = Features({'''labels''': ClassLabel} )
lowerCamelCase : str = "text"
lowerCamelCase : str = "labels"
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] ) -> List[str]:
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , UpperCAmelCase__ ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
lowerCAmelCase = copy.deepcopy(self )
lowerCAmelCase = self.label_schema.copy()
lowerCAmelCase = features[self.label_column]
lowerCAmelCase = label_schema
return task_template
@property
def __UpperCAmelCase ( self : List[Any] ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 513 |
'''simple docstring'''
from __future__ import annotations
def a_ ( lowerCamelCase : list , lowerCamelCase : int ):
# Checks if the entire collection has been sorted
if len(lowerCamelCase ) <= 1 or n <= 1:
return
insert_next(lowerCamelCase , n - 1 )
rec_insertion_sort(lowerCamelCase , n - 1 )
def a_ ( lowerCamelCase : list , lowerCamelCase : int ):
# Checks order between adjacent elements
if index >= len(lowerCamelCase ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
lowerCAmelCase , lowerCAmelCase = (
collection[index],
collection[index - 1],
)
insert_next(lowerCamelCase , index + 1 )
if __name__ == "__main__":
__snake_case =input("""Enter integers separated by spaces: """)
__snake_case =[int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 513 | 1 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
a__ = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l='''
def A__ (snake_case : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
__UpperCamelCase : Optional[Any] = BeautifulSoup(requests.get(url + location ).content , """html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""" , attrs={"""data-tn-component""": """organicJob"""} ):
__UpperCamelCase : Union[str, Any] = job.find("""a""" , attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
__UpperCamelCase : Union[str, Any] = job.find("""span""" , {"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('''Bangalore'''), 1):
print(f"Job {i:>2} is {job[0]} at {job[1]}")
| 279 |
def A__ (snake_case : int ) -> bool:
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 279 | 1 |
'''simple docstring'''
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class UpperCamelCase__ ( __a ):
'''simple docstring'''
_snake_case = None
_snake_case = None
@property
def snake_case ( self ) -> str:
'''simple docstring'''
return self.feat_extract_tester.prepare_feat_extract_dict()
def snake_case ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(a_ , 'feature_size' ) )
self.assertTrue(hasattr(a_ , 'sampling_rate' ) )
self.assertTrue(hasattr(a_ , 'padding_value' ) )
def snake_case ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCAmelCase : int = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
__lowerCAmelCase : List[str] = feat_extract.model_input_names[0]
__lowerCAmelCase : Dict = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(a_ ) == len(a_ ) for x, y in zip(a_ , processed_features[input_name] ) ) )
__lowerCAmelCase : List[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=a_ )
__lowerCAmelCase : Optional[int] = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
__lowerCAmelCase : Optional[Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowerCAmelCase : Optional[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def snake_case ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Any = self.feat_extract_tester.prepare_inputs_for_common(equal_length=a_ )
__lowerCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
__lowerCAmelCase : Optional[int] = feat_extract.model_input_names[0]
__lowerCAmelCase : Dict = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
__lowerCAmelCase : str = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowerCAmelCase : Dict = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def snake_case ( self ) -> int:
'''simple docstring'''
__lowerCAmelCase : List[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=a_ )
__lowerCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
__lowerCAmelCase : List[str] = feat_extract.model_input_names[0]
__lowerCAmelCase : List[str] = BatchFeature({input_name: speech_inputs} , tensor_type='tf' )
__lowerCAmelCase : int = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowerCAmelCase : Optional[int] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def snake_case ( self , SCREAMING_SNAKE_CASE=False ) -> Optional[int]:
'''simple docstring'''
def _inputs_have_equal_length(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = len(input[0] )
for input_slice in input[1:]:
if len(a_ ) != length:
return False
return True
def _inputs_are_equal(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if len(a_ ) != len(a_ ):
return False
for input_slice_a, input_slice_a in zip(a_ , a_ ):
if not np.allclose(np.asarray(a_ ) , np.asarray(a_ ) , atol=1e-3 ):
return False
return True
__lowerCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
__lowerCAmelCase : Dict = self.feat_extract_tester.prepare_inputs_for_common(numpify=a_ )
__lowerCAmelCase : Dict = feat_extract.model_input_names[0]
__lowerCAmelCase : str = BatchFeature({input_name: speech_inputs} )
__lowerCAmelCase : List[str] = self.feat_extract_tester.seq_length_diff
__lowerCAmelCase : List[Any] = self.feat_extract_tester.max_seq_length + pad_diff
__lowerCAmelCase : Optional[int] = self.feat_extract_tester.min_seq_length
__lowerCAmelCase : Any = self.feat_extract_tester.batch_size
__lowerCAmelCase : Any = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
__lowerCAmelCase : Optional[int] = feat_extract.pad(a_ , padding=a_ )
__lowerCAmelCase : Union[str, Any] = input_a[input_name]
__lowerCAmelCase : str = feat_extract.pad(a_ , padding='longest' )
__lowerCAmelCase : List[str] = input_a[input_name]
__lowerCAmelCase : Optional[Any] = feat_extract.pad(a_ , padding='max_length' , max_length=len(speech_inputs[-1] ) )
__lowerCAmelCase : str = input_a[input_name]
__lowerCAmelCase : Any = feat_extract.pad(a_ , padding='longest' , return_tensors='np' )
__lowerCAmelCase : Optional[int] = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(a_ ):
feat_extract.pad(a_ , padding='max_length' )[input_name]
__lowerCAmelCase : str = feat_extract.pad(
a_ , padding='max_length' , max_length=a_ , return_tensors='np' )
__lowerCAmelCase : List[Any] = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(a_ ) )
self.assertTrue(_inputs_have_equal_length(a_ ) )
self.assertTrue(_inputs_have_equal_length(a_ ) )
self.assertTrue(_inputs_are_equal(a_ , a_ ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
__lowerCAmelCase : str = feat_extract.pad(a_ , pad_to_multiple_of=10 )
__lowerCAmelCase : Union[str, Any] = input_a[input_name]
__lowerCAmelCase : Any = feat_extract.pad(a_ , padding='longest' , pad_to_multiple_of=10 )
__lowerCAmelCase : List[str] = input_a[input_name]
__lowerCAmelCase : Union[str, Any] = feat_extract.pad(
a_ , padding='max_length' , pad_to_multiple_of=10 , max_length=a_ )
__lowerCAmelCase : int = input_a[input_name]
__lowerCAmelCase : str = feat_extract.pad(
a_ , padding='max_length' , pad_to_multiple_of=10 , max_length=a_ , return_tensors='np' , )
__lowerCAmelCase : Tuple = input_a[input_name]
self.assertTrue(all(len(a_ ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(a_ , a_ ) )
__lowerCAmelCase : Optional[int] = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(a_ ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
__lowerCAmelCase : Tuple = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def snake_case ( self , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]:
'''simple docstring'''
def _inputs_have_equal_length(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = len(input[0] )
for input_slice in input[1:]:
if len(a_ ) != length:
return False
return True
def _inputs_are_equal(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if len(a_ ) != len(a_ ):
return False
for input_slice_a, input_slice_a in zip(a_ , a_ ):
if not np.allclose(np.asarray(a_ ) , np.asarray(a_ ) , atol=1e-3 ):
return False
return True
__lowerCAmelCase : Any = self.feature_extraction_class(**self.feat_extract_dict )
__lowerCAmelCase : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common(numpify=a_ )
__lowerCAmelCase : List[str] = feat_extract.model_input_names[0]
__lowerCAmelCase : List[str] = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
__lowerCAmelCase : Tuple = feat_extract.pad(
a_ , padding='max_length' , max_length=len(speech_inputs[0] ) , truncation=a_ )
__lowerCAmelCase : Optional[int] = input_a[input_name]
__lowerCAmelCase : Tuple = feat_extract.pad(a_ , padding='max_length' , max_length=len(speech_inputs[0] ) )
__lowerCAmelCase : Tuple = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(a_ ) )
self.assertFalse(_inputs_have_equal_length(a_ ) )
# truncate to smallest with np
__lowerCAmelCase : List[Any] = feat_extract.pad(
a_ , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' , truncation=a_ , )
__lowerCAmelCase : List[Any] = input_a[input_name]
__lowerCAmelCase : List[str] = feat_extract.pad(
a_ , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' )
__lowerCAmelCase : Any = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(a_ ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(a_ ) )
# truncate to middle
__lowerCAmelCase : Tuple = feat_extract.pad(
a_ , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=a_ , return_tensors='np' , )
__lowerCAmelCase : Dict = input_a[input_name]
__lowerCAmelCase : int = feat_extract.pad(
a_ , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=a_ )
__lowerCAmelCase : List[Any] = input_a[input_name]
__lowerCAmelCase : Any = feat_extract.pad(
a_ , padding='max_length' , max_length=len(speech_inputs[1] ) , return_tensors='np' )
__lowerCAmelCase : List[Any] = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(a_ ) )
self.assertTrue(_inputs_have_equal_length(a_ ) )
self.assertTrue(_inputs_are_equal(a_ , a_ ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(a_ ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(a_ ):
feat_extract.pad(a_ , truncation=a_ )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(a_ ):
feat_extract.pad(a_ , padding='longest' , truncation=a_ )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(a_ ):
feat_extract.pad(a_ , padding='longest' , truncation=a_ )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(a_ ):
feat_extract.pad(a_ , padding='max_length' , truncation=a_ )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
__lowerCAmelCase : int = 12
__lowerCAmelCase : Any = feat_extract.pad(
a_ , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=a_ , truncation=a_ , )
__lowerCAmelCase : Optional[int] = input_a[input_name]
__lowerCAmelCase : Tuple = feat_extract.pad(
a_ , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=a_ , )
__lowerCAmelCase : List[Any] = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
__lowerCAmelCase : Tuple = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
__lowerCAmelCase : List[str] = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(a_ ) )
self.assertFalse(_inputs_have_equal_length(a_ ) )
def snake_case ( self ) -> Union[str, Any]:
'''simple docstring'''
self._check_padding(numpify=a_ )
def snake_case ( self ) -> Optional[Any]:
'''simple docstring'''
self._check_padding(numpify=a_ )
def snake_case ( self ) -> str:
'''simple docstring'''
self._check_truncation(numpify=a_ )
def snake_case ( self ) -> List[Any]:
'''simple docstring'''
self._check_truncation(numpify=a_ )
@require_torch
def snake_case ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
__lowerCAmelCase : Dict = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase : Any = feat_extract.model_input_names[0]
__lowerCAmelCase : Union[str, Any] = BatchFeature({input_name: speech_inputs} )
__lowerCAmelCase : List[Any] = feat_extract.pad(a_ , padding='longest' , return_tensors='np' )[input_name]
__lowerCAmelCase : Tuple = feat_extract.pad(a_ , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def snake_case ( self ) -> int:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
__lowerCAmelCase : Optional[int] = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase : int = feat_extract.model_input_names[0]
__lowerCAmelCase : Tuple = BatchFeature({input_name: speech_inputs} )
__lowerCAmelCase : Optional[Any] = feat_extract.pad(a_ , padding='longest' , return_tensors='np' )[input_name]
__lowerCAmelCase : Tuple = feat_extract.pad(a_ , padding='longest' , return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def snake_case ( self ) -> Any:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = self.feat_extract_dict
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Optional[int] = self.feature_extraction_class(**a_ )
__lowerCAmelCase : List[Any] = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase : List[str] = [len(a_ ) for x in speech_inputs]
__lowerCAmelCase : int = feat_extract.model_input_names[0]
__lowerCAmelCase : Any = BatchFeature({input_name: speech_inputs} )
__lowerCAmelCase : List[str] = feat_extract.pad(a_ , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , a_ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , a_ )
def snake_case ( self ) -> List[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = self.feat_extract_dict
__lowerCAmelCase : Dict = True
__lowerCAmelCase : str = self.feature_extraction_class(**a_ )
__lowerCAmelCase : Optional[int] = self.feat_extract_tester.prepare_inputs_for_common()
__lowerCAmelCase : Optional[int] = [len(a_ ) for x in speech_inputs]
__lowerCAmelCase : int = feat_extract.model_input_names[0]
__lowerCAmelCase : Any = BatchFeature({input_name: speech_inputs} )
__lowerCAmelCase : List[Any] = min(a_ )
__lowerCAmelCase : Optional[Any] = feat_extract.pad(
a_ , padding='max_length' , max_length=a_ , truncation=a_ , return_tensors='np' )
self.assertIn('attention_mask' , a_ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 706 |
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
A_ = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE = 14 ) -> None:
if group not in primes:
raise ValueError('Unsupported Group' )
__lowerCAmelCase : List[str] = primes[group]['prime']
__lowerCAmelCase : Tuple = primes[group]['generator']
__lowerCAmelCase : Optional[int] = int(hexlify(urandom(32 ) ) , base=16 )
def snake_case ( self ) -> str:
return hex(self.__private_key )[2:]
def snake_case ( self ) -> str:
__lowerCAmelCase : Dict = pow(self.generator , self.__private_key , self.prime )
return hex(SCREAMING_SNAKE_CASE )[2:]
def snake_case ( self , SCREAMING_SNAKE_CASE ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(SCREAMING_SNAKE_CASE , (self.prime - 1) // 2 , self.prime ) == 1
)
def snake_case ( self , SCREAMING_SNAKE_CASE ) -> str:
__lowerCAmelCase : List[Any] = int(SCREAMING_SNAKE_CASE , base=16 )
if not self.is_valid_public_key(SCREAMING_SNAKE_CASE ):
raise ValueError('Invalid public key' )
__lowerCAmelCase : Tuple = pow(SCREAMING_SNAKE_CASE , self.__private_key , self.prime )
return shaaaa(str(SCREAMING_SNAKE_CASE ).encode() ).hexdigest()
@staticmethod
def snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(SCREAMING_SNAKE_CASE , (prime - 1) // 2 , SCREAMING_SNAKE_CASE ) == 1
)
@staticmethod
def snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 14 ) -> str:
__lowerCAmelCase : Tuple = int(SCREAMING_SNAKE_CASE , base=16 )
__lowerCAmelCase : Any = int(SCREAMING_SNAKE_CASE , base=16 )
__lowerCAmelCase : Optional[int] = primes[group]['prime']
if not DiffieHellman.is_valid_public_key_static(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError('Invalid public key' )
__lowerCAmelCase : List[Any] = pow(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return shaaaa(str(SCREAMING_SNAKE_CASE ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 123 | 0 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase__ : str = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2')
UpperCamelCase__ : Any = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2').to(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : List[str] = -1
UpperCamelCase__ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Dict = model.generate(__SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Tuple = tokenizer.decode(greedy_ids[0])
with CaptureStdout() as cs:
UpperCamelCase__ : List[str] = TextStreamer(__SCREAMING_SNAKE_CASE)
model.generate(__SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=__SCREAMING_SNAKE_CASE , streamer=__SCREAMING_SNAKE_CASE)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCamelCase__ : List[str] = cs.out[:-1]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def lowerCAmelCase__ ( self) -> Tuple:
UpperCamelCase__ : Tuple = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2')
UpperCamelCase__ : Any = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2').to(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Tuple = -1
UpperCamelCase__ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Dict = model.generate(__SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Any = tokenizer.decode(greedy_ids[0])
UpperCamelCase__ : Tuple = TextIteratorStreamer(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Any = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCamelCase__ : Tuple = Thread(target=model.generate , kwargs=__SCREAMING_SNAKE_CASE)
thread.start()
UpperCamelCase__ : Tuple = """"""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def lowerCAmelCase__ ( self) -> List[str]:
UpperCamelCase__ : List[Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2')
UpperCamelCase__ : List[str] = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2').to(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : List[Any] = -1
UpperCamelCase__ : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Union[str, Any] = model.generate(__SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : str = greedy_ids[:, input_ids.shape[1] :]
UpperCamelCase__ : Tuple = tokenizer.decode(new_greedy_ids[0])
with CaptureStdout() as cs:
UpperCamelCase__ : Any = TextStreamer(__SCREAMING_SNAKE_CASE , skip_prompt=__SCREAMING_SNAKE_CASE)
model.generate(__SCREAMING_SNAKE_CASE , max_new_tokens=10 , do_sample=__SCREAMING_SNAKE_CASE , streamer=__SCREAMING_SNAKE_CASE)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCamelCase__ : Any = cs.out[:-1]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def lowerCAmelCase__ ( self) -> List[str]:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
UpperCamelCase__ : str = AutoTokenizer.from_pretrained('distilgpt2')
UpperCamelCase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained('distilgpt2').to(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Any = -1
UpperCamelCase__ : Union[str, Any] = torch.ones((1, 5) , device=__SCREAMING_SNAKE_CASE).long() * model.config.bos_token_id
with CaptureStdout() as cs:
UpperCamelCase__ : Optional[int] = TextStreamer(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE)
model.generate(__SCREAMING_SNAKE_CASE , max_new_tokens=1 , do_sample=__SCREAMING_SNAKE_CASE , streamer=__SCREAMING_SNAKE_CASE)
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
UpperCamelCase__ : Tuple = cs.out[:-1] # Remove the final "\n"
UpperCamelCase__ : List[Any] = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='pt')
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1))
def lowerCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase__ : Dict = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2')
UpperCamelCase__ : Dict = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2').to(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Optional[Any] = -1
UpperCamelCase__ : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : str = TextIteratorStreamer(__SCREAMING_SNAKE_CASE , timeout=0.001)
UpperCamelCase__ : Dict = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer}
UpperCamelCase__ : List[str] = Thread(target=model.generate , kwargs=__SCREAMING_SNAKE_CASE)
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(__SCREAMING_SNAKE_CASE):
UpperCamelCase__ : Optional[Any] = """"""
for new_text in streamer:
streamer_text += new_text
| 410 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Optional[int] = logging.get_logger(__name__)
A_ : Tuple = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''segformer'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=[2, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[8, 4, 2, 1] , __SCREAMING_SNAKE_CASE=[3_2, 6_4, 1_6_0, 2_5_6] , __SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , __SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , __SCREAMING_SNAKE_CASE=[1, 2, 5, 8] , __SCREAMING_SNAKE_CASE=[4, 4, 4, 4] , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1e-6 , __SCREAMING_SNAKE_CASE=2_5_6 , __SCREAMING_SNAKE_CASE=2_5_5 , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , __SCREAMING_SNAKE_CASE , )
snake_case__ : Dict = num_channels
snake_case__ : Optional[Any] = num_encoder_blocks
snake_case__ : Any = depths
snake_case__ : Optional[int] = sr_ratios
snake_case__ : Tuple = hidden_sizes
snake_case__ : List[str] = patch_sizes
snake_case__ : str = strides
snake_case__ : Optional[int] = mlp_ratios
snake_case__ : Optional[Any] = num_attention_heads
snake_case__ : Dict = hidden_act
snake_case__ : Optional[int] = hidden_dropout_prob
snake_case__ : List[str] = attention_probs_dropout_prob
snake_case__ : List[Any] = classifier_dropout_prob
snake_case__ : int = initializer_range
snake_case__ : List[str] = drop_path_rate
snake_case__ : int = layer_norm_eps
snake_case__ : List[Any] = decoder_hidden_size
snake_case__ : List[Any] = kwargs.get("""reshape_last_stage""" , __SCREAMING_SNAKE_CASE )
snake_case__ : Dict = semantic_loss_ignore_index
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = version.parse('''1.11''' )
@property
def __UpperCamelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __UpperCamelCase ( self ):
return 1e-4
@property
def __UpperCamelCase ( self ):
return 1_2
| 38 | 0 |
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : float = 1 / sqrt(2 ) ):
'''simple docstring'''
snake_case_ : int = tau * frequency / samplerate
snake_case_ : Any = sin(__UpperCamelCase )
snake_case_ : str = cos(__UpperCamelCase )
snake_case_ : List[Any] = _sin / (2 * q_factor)
snake_case_ : List[str] = (1 - _cos) / 2
snake_case_ : str = 1 - _cos
snake_case_ : Union[str, Any] = 1 + alpha
snake_case_ : Tuple = -2 * _cos
snake_case_ : int = 1 - alpha
snake_case_ : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : float = 1 / sqrt(2 ) ):
'''simple docstring'''
snake_case_ : Any = tau * frequency / samplerate
snake_case_ : Union[str, Any] = sin(__UpperCamelCase )
snake_case_ : Tuple = cos(__UpperCamelCase )
snake_case_ : int = _sin / (2 * q_factor)
snake_case_ : str = (1 + _cos) / 2
snake_case_ : Union[str, Any] = -1 - _cos
snake_case_ : List[Any] = 1 + alpha
snake_case_ : str = -2 * _cos
snake_case_ : Optional[int] = 1 - alpha
snake_case_ : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : float = 1 / sqrt(2 ) ):
'''simple docstring'''
snake_case_ : str = tau * frequency / samplerate
snake_case_ : Union[str, Any] = sin(__UpperCamelCase )
snake_case_ : Any = cos(__UpperCamelCase )
snake_case_ : Optional[Any] = _sin / (2 * q_factor)
snake_case_ : str = _sin / 2
snake_case_ : List[Any] = 0
snake_case_ : Any = -ba
snake_case_ : Optional[int] = 1 + alpha
snake_case_ : Union[str, Any] = -2 * _cos
snake_case_ : Any = 1 - alpha
snake_case_ : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : float = 1 / sqrt(2 ) ):
'''simple docstring'''
snake_case_ : int = tau * frequency / samplerate
snake_case_ : str = sin(__UpperCamelCase )
snake_case_ : Optional[int] = cos(__UpperCamelCase )
snake_case_ : Any = _sin / (2 * q_factor)
snake_case_ : List[str] = 1 - alpha
snake_case_ : Optional[Any] = -2 * _cos
snake_case_ : List[Any] = 1 + alpha
snake_case_ : int = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : float , __UpperCamelCase : float = 1 / sqrt(2 ) , ):
'''simple docstring'''
snake_case_ : Union[str, Any] = tau * frequency / samplerate
snake_case_ : Optional[Any] = sin(__UpperCamelCase )
snake_case_ : Tuple = cos(__UpperCamelCase )
snake_case_ : Any = _sin / (2 * q_factor)
snake_case_ : Any = 1_0 ** (gain_db / 4_0)
snake_case_ : Optional[Any] = 1 + alpha * big_a
snake_case_ : Any = -2 * _cos
snake_case_ : Any = 1 - alpha * big_a
snake_case_ : Union[str, Any] = 1 + alpha / big_a
snake_case_ : List[str] = -2 * _cos
snake_case_ : int = 1 - alpha / big_a
snake_case_ : str = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : float , __UpperCamelCase : float = 1 / sqrt(2 ) , ):
'''simple docstring'''
snake_case_ : List[Any] = tau * frequency / samplerate
snake_case_ : List[str] = sin(__UpperCamelCase )
snake_case_ : Optional[int] = cos(__UpperCamelCase )
snake_case_ : int = _sin / (2 * q_factor)
snake_case_ : int = 1_0 ** (gain_db / 4_0)
snake_case_ : Tuple = (big_a + 1) - (big_a - 1) * _cos
snake_case_ : int = (big_a + 1) + (big_a - 1) * _cos
snake_case_ : Any = (big_a - 1) - (big_a + 1) * _cos
snake_case_ : Union[str, Any] = (big_a - 1) + (big_a + 1) * _cos
snake_case_ : List[str] = 2 * sqrt(__UpperCamelCase ) * alpha
snake_case_ : Optional[int] = big_a * (pmc + aaa)
snake_case_ : Optional[Any] = 2 * big_a * mpc
snake_case_ : Union[str, Any] = big_a * (pmc - aaa)
snake_case_ : Dict = ppmc + aaa
snake_case_ : str = -2 * pmpc
snake_case_ : str = ppmc - aaa
snake_case_ : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : float , __UpperCamelCase : float = 1 / sqrt(2 ) , ):
'''simple docstring'''
snake_case_ : Dict = tau * frequency / samplerate
snake_case_ : int = sin(__UpperCamelCase )
snake_case_ : List[Any] = cos(__UpperCamelCase )
snake_case_ : int = _sin / (2 * q_factor)
snake_case_ : Dict = 1_0 ** (gain_db / 4_0)
snake_case_ : Dict = (big_a + 1) - (big_a - 1) * _cos
snake_case_ : Any = (big_a + 1) + (big_a - 1) * _cos
snake_case_ : Any = (big_a - 1) - (big_a + 1) * _cos
snake_case_ : List[str] = (big_a - 1) + (big_a + 1) * _cos
snake_case_ : int = 2 * sqrt(__UpperCamelCase ) * alpha
snake_case_ : Optional[Any] = big_a * (ppmc + aaa)
snake_case_ : List[str] = -2 * big_a * pmpc
snake_case_ : Any = big_a * (ppmc - aaa)
snake_case_ : Optional[Any] = pmc + aaa
snake_case_ : Optional[Any] = 2 * mpc
snake_case_ : str = pmc - aaa
snake_case_ : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 712 |
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
__lowerCAmelCase : Optional[Any] = parser.parse_args()
__lowerCAmelCase : Dict = '''cpu'''
__lowerCAmelCase : Optional[Any] = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
__lowerCAmelCase : Tuple = '''path-to-your-trained-model'''
__lowerCAmelCase : List[Any] = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__lowerCAmelCase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__lowerCAmelCase : List[Any] = pipe.to(device)
# to channels last
__lowerCAmelCase : Optional[Any] = pipe.unet.to(memory_format=torch.channels_last)
__lowerCAmelCase : List[str] = pipe.vae.to(memory_format=torch.channels_last)
__lowerCAmelCase : Optional[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__lowerCAmelCase : Dict = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__lowerCAmelCase : Tuple = torch.randn(2, 4, 64, 64)
__lowerCAmelCase : Any = torch.rand(1) * 999
__lowerCAmelCase : List[str] = torch.randn(2, 77, 768)
__lowerCAmelCase : Optional[int] = (sample, timestep, encoder_hidden_status)
try:
__lowerCAmelCase : List[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__lowerCAmelCase : Optional[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__lowerCAmelCase : Any = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__lowerCAmelCase : int = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__lowerCAmelCase : List[str] = 666
__lowerCAmelCase : Optional[int] = torch.Generator(device).manual_seed(seed)
__lowerCAmelCase : List[Any] = {'''generator''': generator}
if args.steps is not None:
__lowerCAmelCase : Any = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__lowerCAmelCase : str = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 21 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
class _snake_case :
def __init__( self : List[Any] , UpperCAmelCase : list[str] ):
__lowerCamelCase : list[dict] = []
self.adlist.append(
{"value": "", "next_states": [], "fail_state": 0, "output": []} )
for keyword in keywords:
self.add_keyword(_A )
self.set_fail_transitions()
def lowerCamelCase__ ( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : str ):
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def lowerCamelCase__ ( self : Union[str, Any] , UpperCAmelCase : str ):
__lowerCamelCase : List[Any] = 0
for character in keyword:
__lowerCamelCase : Tuple = self.find_next_state(_A , _A )
if next_state is None:
self.adlist.append(
{
"value": character,
"next_states": [],
"fail_state": 0,
"output": [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
__lowerCamelCase : int = len(self.adlist ) - 1
else:
__lowerCamelCase : str = next_state
self.adlist[current_state]["output"].append(_A )
def lowerCamelCase__ ( self : List[Any] ):
__lowerCamelCase : deque = deque()
for node in self.adlist[0]["next_states"]:
q.append(_A )
__lowerCamelCase : int = 0
while q:
__lowerCamelCase : Tuple = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(_A )
__lowerCamelCase : Dict = self.adlist[r]['''fail_state''']
while (
self.find_next_state(_A , self.adlist[child]["value"] ) is None
and state != 0
):
__lowerCamelCase : Union[str, Any] = self.adlist[state]['''fail_state''']
__lowerCamelCase : Tuple = self.find_next_state(
_A , self.adlist[child]["value"] )
if self.adlist[child]["fail_state"] is None:
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : int = (
self.adlist[child]['''output''']
+ self.adlist[self.adlist[child]['''fail_state''']]['''output''']
)
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : str ):
__lowerCamelCase : dict = {} # returns a dict with keywords and list of its occurrences
__lowerCamelCase : Optional[Any] = 0
for i in range(len(_A ) ):
while (
self.find_next_state(_A , string[i] ) is None
and current_state != 0
):
__lowerCamelCase : int = self.adlist[current_state]['''fail_state''']
__lowerCamelCase : Dict = self.find_next_state(_A , string[i] )
if next_state is None:
__lowerCamelCase : Union[str, Any] = 0
else:
__lowerCamelCase : Any = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
__lowerCamelCase : str = []
result[key].append(i - len(_A ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod() | 646 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCamelCase__ = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''',
},
}
UpperCamelCase__ = {
'''facebook/bart-base''': 1_0_2_4,
'''facebook/bart-large''': 1_0_2_4,
'''facebook/bart-large-mnli''': 1_0_2_4,
'''facebook/bart-large-cnn''': 1_0_2_4,
'''facebook/bart-large-xsum''': 1_0_2_4,
'''yjernite/bart_eli5''': 1_0_2_4,
}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ['input_ids', 'attention_mask']
lowerCAmelCase__ = BartTokenizer
def __init__( self : Tuple , _A : List[str]=None , _A : Optional[Any]=None , _A : Union[str, Any]=None , _A : Tuple="replace" , _A : Optional[Any]="<s>" , _A : int="</s>" , _A : Optional[Any]="</s>" , _A : List[str]="<s>" , _A : Optional[int]="<unk>" , _A : Optional[int]="<pad>" , _A : str="<mask>" , _A : Dict=False , _A : int=True , **_A : Optional[Any] , ):
'''simple docstring'''
super().__init__(
_A , _A , tokenizer_file=_A , errors=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , trim_offsets=_A , **_A , )
UpperCAmelCase__ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _A ) != add_prefix_space:
UpperCAmelCase__ : str = getattr(_A , pre_tok_state.pop('''type''' ) )
UpperCAmelCase__ : Any = add_prefix_space
UpperCAmelCase__ : str = pre_tok_class(**_A )
UpperCAmelCase__ : Dict = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase__ : Optional[Any] = '''post_processor'''
UpperCAmelCase__ : List[Any] = getattr(self.backend_tokenizer , _A , _A )
if tokenizer_component_instance:
UpperCAmelCase__ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase__ : Union[str, Any] = tuple(state['''sep'''] )
if "cls" in state:
UpperCAmelCase__ : Union[str, Any] = tuple(state['''cls'''] )
UpperCAmelCase__ : Dict = False
if state.get('''add_prefix_space''' , _A ) != add_prefix_space:
UpperCAmelCase__ : Union[str, Any] = add_prefix_space
UpperCAmelCase__ : Dict = True
if state.get('''trim_offsets''' , _A ) != trim_offsets:
UpperCAmelCase__ : List[Any] = trim_offsets
UpperCAmelCase__ : List[Any] = True
if changes_to_apply:
UpperCAmelCase__ : Dict = getattr(_A , state.pop('''type''' ) )
UpperCAmelCase__ : Union[str, Any] = component_class(**_A )
setattr(self.backend_tokenizer , _A , _A )
@property
def lowercase_ ( self : Dict ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase_ ( self : Dict , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else value
UpperCAmelCase__ : str = value
def lowercase_ ( self : Optional[int] , *_A : List[str] , **_A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = kwargs.get('''is_split_into_words''' , _A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*_A , **_A )
def lowercase_ ( self : Optional[Any] , *_A : Union[str, Any] , **_A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = kwargs.get('''is_split_into_words''' , _A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*_A , **_A )
def lowercase_ ( self : Optional[int] , _A : str , _A : Optional[str] = None ):
'''simple docstring'''
UpperCAmelCase__ : str = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
def lowercase_ ( self : Tuple , _A : Union[str, Any] , _A : Optional[int]=None ):
'''simple docstring'''
UpperCAmelCase__ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : int , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [self.sep_token_id]
UpperCAmelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 75 | 0 |
"""simple docstring"""
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
class UpperCamelCase_ (__A ):
__magic_name__ = CLIPConfig
__magic_name__ = ['''CLIPEncoderLayer''']
def __init__( self : Optional[int] , lowerCAmelCase_ : CLIPConfig ) -> str:
super().__init__(lowerCAmelCase_ )
UpperCAmelCase_ : str = CLIPVisionModelWithProjection(config.vision_config )
UpperCAmelCase_ : Any = nn.Linear(config.vision_config.projection_dim , 1 )
UpperCAmelCase_ : List[Any] = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int]=0.5 , lowerCAmelCase_ : List[Any]=0.5 ) -> Any:
UpperCAmelCase_ : Union[str, Any] = self.vision_model(lowerCAmelCase_ )[0]
UpperCAmelCase_ : Optional[Any] = self.p_head(lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = nsfw_detected.flatten()
UpperCAmelCase_ : List[str] = nsfw_detected > p_threshold
UpperCAmelCase_ : str = nsfw_detected.tolist()
if any(lowerCAmelCase_ ):
logger.warning(
"Potential NSFW content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, nsfw_detected_ in enumerate(lowerCAmelCase_ ):
if nsfw_detected_:
UpperCAmelCase_ : int = np.zeros(images[idx].shape )
UpperCAmelCase_ : Union[str, Any] = self.w_head(lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = watermark_detected.flatten()
UpperCAmelCase_ : str = watermark_detected > w_threshold
UpperCAmelCase_ : Any = watermark_detected.tolist()
if any(lowerCAmelCase_ ):
logger.warning(
"Potential watermarked content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, watermark_detected_ in enumerate(lowerCAmelCase_ ):
if watermark_detected_:
UpperCAmelCase_ : Optional[int] = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 714 |
"""simple docstring"""
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowerCamelCase_ = pytest.mark.integration
@pytest.mark.parametrize("path" ,["paws", "csv"] )
def snake_case ( A__ ,A__ ):
inspect_dataset(A__ ,A__ )
UpperCAmelCase_ : Tuple = path + ".py"
assert script_name in os.listdir(A__ )
assert "__pycache__" not in os.listdir(A__ )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" ,["accuracy"] )
def snake_case ( A__ ,A__ ):
inspect_metric(A__ ,A__ )
UpperCAmelCase_ : List[str] = path + ".py"
assert script_name in os.listdir(A__ )
assert "__pycache__" not in os.listdir(A__ )
@pytest.mark.parametrize(
"path, config_name, expected_splits" ,[
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] ,)
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : Dict = get_dataset_config_info(A__ ,config_name=A__ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" ,[
("paws", None, ValueError),
] ,)
def snake_case ( A__ ,A__ ,A__ ):
with pytest.raises(A__ ):
get_dataset_config_info(A__ ,config_name=A__ )
@pytest.mark.parametrize(
"path, expected" ,[
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] ,)
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : str = get_dataset_config_names(A__ )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" ,[
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] ,)
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : List[str] = get_dataset_infos(A__ )
assert list(infos.keys() ) == expected_configs
UpperCAmelCase_ : List[str] = expected_configs[0]
assert expected_config in infos
UpperCAmelCase_ : int = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" ,[
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] ,)
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : Optional[Any] = get_dataset_infos(A__ )
assert expected_config in infos
UpperCAmelCase_ : Union[str, Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" ,[
("paws", None, ValueError),
] ,)
def snake_case ( A__ ,A__ ,A__ ):
with pytest.raises(A__ ):
get_dataset_split_names(A__ ,config_name=A__ )
| 463 | 0 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self , lowercase , lowercase) -> List[str]:
'''simple docstring'''
return F'gaussian_noise_s={seed}_shape={"_".join([str(lowercase) for s in shape])}.npy'
def __lowercase ( self) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
def __lowercase ( self , lowercase=0 , lowercase=(4, 4, 64, 64) , lowercase=False) -> List[str]:
'''simple docstring'''
a__ : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
a__ : Tuple = jnp.array(load_hf_numpy(self.get_file_format(lowercase , lowercase)) , dtype=lowercase)
return image
def __lowercase ( self , lowercase=False , lowercase="CompVis/stable-diffusion-v1-4") -> int:
'''simple docstring'''
a__ : int = jnp.bfloataa if fpaa else jnp.floataa
a__ : str = 'bf16' if fpaa else None
a__ , a__ : Optional[Any] = FlaxUNetaDConditionModel.from_pretrained(
lowercase , subfolder='unet' , dtype=lowercase , revision=lowercase)
return model, params
def __lowercase ( self , lowercase=0 , lowercase=(4, 77, 768) , lowercase=False) -> str:
'''simple docstring'''
a__ : List[Any] = jnp.bfloataa if fpaa else jnp.floataa
a__ : str = jnp.array(load_hf_numpy(self.get_file_format(lowercase , lowercase)) , dtype=lowercase)
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.23_23, -0.13_04, 0.08_13, -0.30_93, -0.09_19, -0.15_71, -0.11_25, -0.58_06]],
[17, 0.55, [-0.08_31, -0.24_43, 0.09_01, -0.09_19, 0.33_96, 0.01_03, -0.37_43, 0.07_01]],
[8, 0.89, [-0.48_63, 0.08_59, 0.08_75, -0.16_58, 0.91_99, -0.01_14, 0.48_39, 0.46_39]],
[3, 1000, [-0.56_49, 0.24_02, -0.55_18, 0.12_48, 1.13_28, -0.24_43, -0.03_25, -1.00_78]],
# fmt: on
])
def __lowercase ( self , lowercase , lowercase , lowercase) -> List[Any]:
'''simple docstring'''
a__ , a__ : List[str] = self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' , fpaa=lowercase)
a__ : Optional[Any] = self.get_latents(lowercase , fpaa=lowercase)
a__ : str = self.get_encoder_hidden_states(lowercase , fpaa=lowercase)
a__ : Dict = model.apply(
{'params': params} , lowercase , jnp.array(lowercase , dtype=jnp.intaa) , encoder_hidden_states=lowercase , ).sample
assert sample.shape == latents.shape
a__ : int = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten())) , dtype=jnp.floataa)
a__ : str = jnp.array(lowercase , dtype=jnp.floataa)
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowercase , lowercase , atol=1e-2)
@parameterized.expand(
[
# fmt: off
[83, 4, [0.15_14, 0.08_07, 0.16_24, 0.10_16, -0.18_96, 0.02_63, 0.06_77, 0.23_10]],
[17, 0.55, [0.11_64, -0.02_16, 0.01_70, 0.15_89, -0.31_20, 0.10_05, -0.05_81, -0.14_58]],
[8, 0.89, [-0.17_58, -0.01_69, 0.10_04, -0.14_11, 0.13_12, 0.11_03, -0.19_96, 0.21_39]],
[3, 1000, [0.12_14, 0.03_52, -0.07_31, -0.15_62, -0.09_94, -0.09_06, -0.23_40, -0.05_39]],
# fmt: on
])
def __lowercase ( self , lowercase , lowercase , lowercase) -> List[str]:
'''simple docstring'''
a__ , a__ : str = self.get_unet_model(model_id='stabilityai/stable-diffusion-2' , fpaa=lowercase)
a__ : Optional[int] = self.get_latents(lowercase , shape=(4, 4, 96, 96) , fpaa=lowercase)
a__ : Optional[int] = self.get_encoder_hidden_states(lowercase , shape=(4, 77, 1024) , fpaa=lowercase)
a__ : List[str] = model.apply(
{'params': params} , lowercase , jnp.array(lowercase , dtype=jnp.intaa) , encoder_hidden_states=lowercase , ).sample
assert sample.shape == latents.shape
a__ : Union[str, Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten())) , dtype=jnp.floataa)
a__ : Any = jnp.array(lowercase , dtype=jnp.floataa)
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowercase , lowercase , atol=1e-2)
| 302 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowercase : Any = logging.get_logger(__name__)
lowercase : Dict = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowercase : List[Any] = {
"""vocab_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"""
),
},
"""merges_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""",
"""roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""",
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"""
),
},
"""tokenizer_file""": {
"""roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""",
"""roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""",
"""roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""",
"""distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""",
"""roberta-base-openai-detector""": (
"""https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"""
),
"""roberta-large-openai-detector""": (
"""https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"""
),
},
}
lowercase : Union[str, Any] = {
"""roberta-base""": 5_1_2,
"""roberta-large""": 5_1_2,
"""roberta-large-mnli""": 5_1_2,
"""distilroberta-base""": 5_1_2,
"""roberta-base-openai-detector""": 5_1_2,
"""roberta-large-openai-detector""": 5_1_2,
}
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : List[Any] = VOCAB_FILES_NAMES
__A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__A : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : List[str] = ['''input_ids''', '''attention_mask''']
__A : Tuple = RobertaTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=None , lowercase="replace" , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=False , lowercase=True , **lowercase , ) -> int:
'''simple docstring'''
super().__init__(
lowercase , lowercase , tokenizer_file=lowercase , errors=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase , **lowercase , )
a__ : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , lowercase) != add_prefix_space:
a__ : Dict = getattr(lowercase , pre_tok_state.pop('type'))
a__ : Optional[int] = add_prefix_space
a__ : Optional[int] = pre_tok_class(**lowercase)
a__ : List[Any] = add_prefix_space
a__ : Dict = 'post_processor'
a__ : Union[str, Any] = getattr(self.backend_tokenizer , lowercase , lowercase)
if tokenizer_component_instance:
a__ : Optional[Any] = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a__ : List[str] = tuple(state['sep'])
if "cls" in state:
a__ : Any = tuple(state['cls'])
a__ : Union[str, Any] = False
if state.get('add_prefix_space' , lowercase) != add_prefix_space:
a__ : int = add_prefix_space
a__ : Dict = True
if state.get('trim_offsets' , lowercase) != trim_offsets:
a__ : List[str] = trim_offsets
a__ : List[str] = True
if changes_to_apply:
a__ : Any = getattr(lowercase , state.pop('type'))
a__ : str = component_class(**lowercase)
setattr(self.backend_tokenizer , lowercase , lowercase)
@property
def __lowercase ( self) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.')
return None
return str(self._mask_token)
@mask_token.setter
def __lowercase ( self , lowercase) -> Dict:
'''simple docstring'''
a__ : Tuple = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase) if isinstance(lowercase , lowercase) else value
a__ : List[str] = value
def __lowercase ( self , *lowercase , **lowercase) -> BatchEncoding:
'''simple docstring'''
a__ : Any = kwargs.get('is_split_into_words' , lowercase)
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowercase , **lowercase)
def __lowercase ( self , *lowercase , **lowercase) -> BatchEncoding:
'''simple docstring'''
a__ : Dict = kwargs.get('is_split_into_words' , lowercase)
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowercase , **lowercase)
def __lowercase ( self , lowercase , lowercase = None) -> Tuple[str]:
'''simple docstring'''
a__ : int = self._tokenizer.model.save(lowercase , name=lowercase)
return tuple(lowercase)
def __lowercase ( self , lowercase , lowercase=None) -> Optional[Any]:
'''simple docstring'''
a__ : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowercase ( self , lowercase , lowercase = None) -> List[int]:
'''simple docstring'''
a__ : Union[str, Any] = [self.sep_token_id]
a__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
| 302 | 1 |
'''simple docstring'''
import numpy as np
__UpperCamelCase : Optional[Any] = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self ) -> Optional[int]:
_A : str = np.array(UpperCAmelCase__ )
def _lowerCamelCase ( self , UpperCAmelCase__ ) -> List[str]:
_A , _A : Any = np.where(letter == self.SQUARE )
_A : List[str] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ ) -> Dict:
_A : int = self.SQUARE[indexa - 1, indexa - 1]
return letter
def _lowerCamelCase ( self , UpperCAmelCase__ ) -> Dict:
_A : Any = message.lower()
_A : str = message.replace(''' ''' , '''''' )
_A : Optional[int] = message.replace('''j''' , '''i''' )
_A : List[str] = np.empty((2, len(UpperCAmelCase__ )) )
for letter_index in range(len(UpperCAmelCase__ ) ):
_A : str = self.letter_to_numbers(message[letter_index] )
_A : List[Any] = numbers[0]
_A : Any = numbers[1]
_A : int = first_step.reshape(2 * len(UpperCAmelCase__ ) )
_A : Union[str, Any] = ''''''
for numbers_index in range(len(UpperCAmelCase__ ) ):
_A : List[Any] = int(second_step[numbers_index * 2] )
_A : Tuple = int(second_step[(numbers_index * 2) + 1] )
_A : Union[str, Any] = self.numbers_to_letter(UpperCAmelCase__ , UpperCAmelCase__ )
_A : Optional[Any] = encoded_message + letter
return encoded_message
def _lowerCamelCase ( self , UpperCAmelCase__ ) -> List[Any]:
_A : Optional[int] = message.lower()
message.replace(''' ''' , '''''' )
_A : Optional[Any] = np.empty(2 * len(UpperCAmelCase__ ) )
for letter_index in range(len(UpperCAmelCase__ ) ):
_A : Any = self.letter_to_numbers(message[letter_index] )
_A : Dict = numbers[0]
_A : Union[str, Any] = numbers[1]
_A : Dict = first_step.reshape((2, len(UpperCAmelCase__ )) )
_A : List[str] = ''''''
for numbers_index in range(len(UpperCAmelCase__ ) ):
_A : Any = int(second_step[0, numbers_index] )
_A : Dict = int(second_step[1, numbers_index] )
_A : Optional[int] = self.numbers_to_letter(UpperCAmelCase__ , UpperCAmelCase__ )
_A : List[Any] = decoded_message + letter
return decoded_message
| 720 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__UpperCamelCase : Union[str, Any] = '''platform'''
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def lowercase ( lowerCAmelCase : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict=None , lowerCAmelCase : str=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : List[str]=None , ):
"""simple docstring"""
if attention_mask is None:
_A : Union[str, Any] = np.where(input_ids != config.pad_token_id , 1 , 0)
if decoder_attention_mask is None:
_A : List[str] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0)
if head_mask is None:
_A : Tuple = np.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
_A : Tuple = np.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
_A : Dict = np.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=1_3 , UpperCAmelCase__=7 , UpperCAmelCase__=True , UpperCAmelCase__=False , UpperCAmelCase__=9_9 , UpperCAmelCase__=1_6 , UpperCAmelCase__=2 , UpperCAmelCase__=4 , UpperCAmelCase__=4 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=3_2 , UpperCAmelCase__=2 , UpperCAmelCase__=1 , UpperCAmelCase__=0 , UpperCAmelCase__=0.0_2 , ) -> Tuple:
_A : List[Any] = parent
_A : Optional[Any] = batch_size
_A : int = seq_length
_A : Optional[int] = is_training
_A : List[Any] = use_labels
_A : Optional[Any] = vocab_size
_A : Tuple = hidden_size
_A : str = num_hidden_layers
_A : Tuple = num_attention_heads
_A : Optional[Any] = intermediate_size
_A : Tuple = hidden_act
_A : int = hidden_dropout_prob
_A : Optional[Any] = attention_probs_dropout_prob
_A : Tuple = max_position_embeddings
_A : Optional[int] = eos_token_id
_A : Optional[Any] = pad_token_id
_A : str = bos_token_id
_A : Optional[Any] = initializer_range
def _lowerCamelCase ( self ) -> Tuple:
_A : List[Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_A : Any = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_A : Optional[Any] = shift_tokens_right(UpperCAmelCase__ , 1 , 2 )
_A : Any = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCAmelCase__ , )
_A : Dict = prepare_blenderbot_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return config, inputs_dict
def _lowerCamelCase ( self ) -> Optional[Any]:
_A , _A : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Tuple:
_A : Tuple = 2_0
_A : Tuple = model_class_name(UpperCAmelCase__ )
_A : Any = model.encode(inputs_dict['''input_ids'''] )
_A , _A : str = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_A : Tuple = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase__ , UpperCAmelCase__ )
_A : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
_A : Optional[int] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_A : Any = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , )
_A : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_A : List[Any] = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase__ , )
_A : List[str] = model.decode(UpperCAmelCase__ , UpperCAmelCase__ )
_A : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""" )
def _lowerCamelCase ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[Any]:
_A : Optional[Any] = 2_0
_A : Optional[int] = model_class_name(UpperCAmelCase__ )
_A : Any = model.encode(inputs_dict['''input_ids'''] )
_A , _A : Tuple = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_A : Tuple = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_A : Any = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase__ , UpperCAmelCase__ )
_A : Dict = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_A : int = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , )
_A : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_A : str = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase__ , decoder_position_ids=UpperCAmelCase__ , )
_A : int = model.decode(UpperCAmelCase__ , UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ )
_A : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ = 9_9
def _lowerCamelCase ( self ) -> List[str]:
_A : str = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
_A : Optional[int] = input_ids.shape[0]
_A : Optional[int] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _lowerCamelCase ( self ) -> Any:
_A , _A , _A : Dict = self._get_config_and_data()
_A : Dict = FlaxBlenderbotSmallForConditionalGeneration(UpperCAmelCase__ )
_A : int = lm_model(input_ids=UpperCAmelCase__ )
_A : Dict = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , UpperCAmelCase__ )
def _lowerCamelCase ( self ) -> str:
_A : Dict = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
_A : Union[str, Any] = FlaxBlenderbotSmallForConditionalGeneration(UpperCAmelCase__ )
_A : List[str] = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
_A : Any = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
_A : Tuple = lm_model(input_ids=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ )
_A : List[Any] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , UpperCAmelCase__ )
def _lowerCamelCase ( self ) -> Optional[int]:
_A : List[str] = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
_A : str = shift_tokens_right(UpperCAmelCase__ , 1 , 2 )
_A : Optional[int] = np.equal(UpperCAmelCase__ , 1 ).astype(np.floataa ).sum()
_A : Optional[int] = np.equal(UpperCAmelCase__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(UpperCAmelCase__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCamelCase__ ( snake_case_ , unittest.TestCase , snake_case_ ):
"""simple docstring"""
__magic_name__ = True
__magic_name__ = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
__magic_name__ = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def _lowerCamelCase ( self ) -> int:
_A : Optional[int] = FlaxBlenderbotSmallModelTester(self )
def _lowerCamelCase ( self ) -> Union[str, Any]:
_A , _A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def _lowerCamelCase ( self ) -> Any:
_A , _A : str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def _lowerCamelCase ( self ) -> Optional[int]:
_A , _A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_A : List[str] = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
_A : Any = model_class(UpperCAmelCase__ )
@jax.jit
def encode_jitted(UpperCAmelCase__ , UpperCAmelCase__=None , **UpperCAmelCase__ ):
return model.encode(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
with self.subTest('''JIT Enabled''' ):
_A : Optional[int] = encode_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_A : int = encode_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def _lowerCamelCase ( self ) -> Union[str, Any]:
_A , _A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_A : Any = model_class(UpperCAmelCase__ )
_A : Dict = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
_A : str = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
return model.decode(
decoder_input_ids=UpperCAmelCase__ , decoder_attention_mask=UpperCAmelCase__ , encoder_outputs=UpperCAmelCase__ , )
with self.subTest('''JIT Enabled''' ):
_A : int = decode_jitted(**UpperCAmelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_A : Any = decode_jitted(**UpperCAmelCase__ ).to_tuple()
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
for jitted_output, output in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowerCamelCase ( self ) -> List[str]:
for model_class_name in self.all_model_classes:
_A : Any = model_class_name.from_pretrained('''facebook/blenderbot_small-90M''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_A : Union[str, Any] = np.ones((1, 1) ) * model.config.eos_token_id
_A : Optional[Any] = model(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
| 417 | 0 |
# flake8: noqa
# Lint as: python3
_lowerCAmelCase = [
"VerificationMode",
"Version",
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
"experimental",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 10 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase_ = {
'configuration_xlm_roberta_xl': [
'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaXLConfig',
'XLMRobertaXLOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaXLForCausalLM',
'XLMRobertaXLForMaskedLM',
'XLMRobertaXLForMultipleChoice',
'XLMRobertaXLForQuestionAnswering',
'XLMRobertaXLForSequenceClassification',
'XLMRobertaXLForTokenClassification',
'XLMRobertaXLModel',
'XLMRobertaXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 560 | 0 |
from __future__ import annotations
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
if len(_lowerCAmelCase ) <= 1 or n <= 1:
return
insert_next(_lowerCAmelCase ,n - 1 )
rec_insertion_sort(_lowerCAmelCase ,n - 1 )
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
if index >= len(_lowerCAmelCase ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
A_ , A_ : List[str] = (
collection[index],
collection[index - 1],
)
insert_next(_lowerCAmelCase ,index + 1 )
if __name__ == "__main__":
_lowerCAmelCase = input("""Enter integers separated by spaces: """)
_lowerCAmelCase = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 481 |
import datasets
from .evaluate import evaluate
_lowerCAmelCase = """\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
"""
_lowerCAmelCase = """
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
"""
_lowerCAmelCase = """
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the SQuAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]
>>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]
>>> squad_metric = datasets.load_metric(\"squad\")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def _lowerCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , )
def _lowerCamelCase ( self , a__ , a__ ):
A_ : Union[str, Any] = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
A_ : Optional[Any] = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
A_ : Union[str, Any] = evaluate(dataset=a__ , predictions=a__ )
return score
| 481 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : Optional[int] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ : List[Any] = {
"""vocab_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-german-cased""": (
"""https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"""
),
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
},
}
lowerCamelCase__ : Dict = {
"""distilbert-base-uncased""": 5_1_2,
"""distilbert-base-uncased-distilled-squad""": 5_1_2,
"""distilbert-base-cased""": 5_1_2,
"""distilbert-base-cased-distilled-squad""": 5_1_2,
"""distilbert-base-german-cased""": 5_1_2,
"""distilbert-base-multilingual-cased""": 5_1_2,
}
lowerCamelCase__ : List[Any] = {
"""distilbert-base-uncased""": {"""do_lower_case""": True},
"""distilbert-base-uncased-distilled-squad""": {"""do_lower_case""": True},
"""distilbert-base-cased""": {"""do_lower_case""": False},
"""distilbert-base-cased-distilled-squad""": {"""do_lower_case""": False},
"""distilbert-base-german-cased""": {"""do_lower_case""": False},
"""distilbert-base-multilingual-cased""": {"""do_lower_case""": False},
}
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Any = VOCAB_FILES_NAMES
__lowerCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : int = ['input_ids', 'attention_mask']
__lowerCAmelCase : Optional[Any] = DistilBertTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="[UNK]" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="[PAD]" , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[MASK]" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowercase__ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("""lowercase""" , SCREAMING_SNAKE_CASE_) != do_lower_case
or normalizer_state.get("""strip_accents""" , SCREAMING_SNAKE_CASE_) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , SCREAMING_SNAKE_CASE_) != tokenize_chinese_chars
):
lowercase__ : Tuple = getattr(SCREAMING_SNAKE_CASE_ , normalizer_state.pop("""type"""))
lowercase__ : int = do_lower_case
lowercase__ : str = strip_accents
lowercase__ : str = tokenize_chinese_chars
lowercase__ : int = normalizer_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = do_lower_case
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None):
'''simple docstring'''
lowercase__ : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None):
'''simple docstring'''
lowercase__ : int = [self.sep_token_id]
lowercase__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None):
'''simple docstring'''
lowercase__ : Dict = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_)
return tuple(SCREAMING_SNAKE_CASE_)
| 12 | '''simple docstring'''
from numpy import exp, pi, sqrt
def __UpperCAmelCase ( a_: int, a_: float = 0.0, a_: float = 1.0 ):
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 494 | 0 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__SCREAMING_SNAKE_CASE : Optional[Any] = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = ['''GPTNeoXTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Any = [
'''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXForCausalLM''',
'''GPTNeoXForQuestionAnswering''',
'''GPTNeoXForSequenceClassification''',
'''GPTNeoXForTokenClassification''',
'''GPTNeoXLayer''',
'''GPTNeoXModel''',
'''GPTNeoXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 149 |
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
__SCREAMING_SNAKE_CASE : Optional[int] = TypeVar('''T''')
class __lowerCamelCase ( Generic[T] ):
"""simple docstring"""
a_: deque[T] # Cache store of keys
a_: set[T] # References of the keys in cache
a_: int = 10 # Maximum capacity of cache
def __init__( self : str , lowerCamelCase_ : int ):
_lowerCAmelCase =deque()
_lowerCAmelCase =set()
if not n:
_lowerCAmelCase =sys.maxsize
elif n < 0:
raise ValueError("""n should be an integer greater than 0.""" )
else:
_lowerCAmelCase =n
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase_ : T ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
_lowerCAmelCase =self.dq_store.pop()
self.key_reference.remove(lowerCamelCase_ )
else:
self.dq_store.remove(lowerCamelCase_ )
self.dq_store.appendleft(lowerCamelCase_ )
self.key_reference.add(lowerCamelCase_ )
def lowerCAmelCase__ ( self : Dict ):
for k in self.dq_store:
print(lowerCamelCase_ )
def __repr__( self : Optional[Any] ):
return F"LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 149 | 1 |
'''simple docstring'''
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
_A = """
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = \"A Call for Clarity in Reporting {BLEU} Scores\",
author = \"Post, Matt\",
booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",
month = oct,
year = \"2018\",
address = \"Belgium, Brussels\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/W18-6319\",
pages = \"186--191\",
}
"""
_A = """\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
"""
_A = """
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=[\"About 95 species are currently accepted .\"]
>>> predictions=[\"About 95 you now get in .\"]
>>> references=[[\"About 95 species are currently known .\"]]
>>> wiki_split = datasets.load_metric(\"wiki_split\")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}
"""
def A_ ( __SCREAMING_SNAKE_CASE : Optional[int] ) -> List[Any]:
def remove_articles(__SCREAMING_SNAKE_CASE : Union[str, Any] ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = re.compile(R'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(__SCREAMING_SNAKE_CASE , ''' ''' , __SCREAMING_SNAKE_CASE )
def white_space_fix(__SCREAMING_SNAKE_CASE : Tuple ):
return " ".join(text.split() )
def remove_punc(__SCREAMING_SNAKE_CASE : Tuple ):
__SCREAMING_SNAKE_CASE : int = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__SCREAMING_SNAKE_CASE : Dict ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__SCREAMING_SNAKE_CASE ) ) ) )
def A_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
return int(normalize_answer(__SCREAMING_SNAKE_CASE ) == normalize_answer(__SCREAMING_SNAKE_CASE ) )
def A_ ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> str:
__SCREAMING_SNAKE_CASE : Dict = [any(compute_exact(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for ref in refs ) for pred, refs in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )]
return (sum(__SCREAMING_SNAKE_CASE ) / len(__SCREAMING_SNAKE_CASE )) * 1_00
def A_ ( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Optional[int] = [rgram for rgrams in rgramslist for rgram in rgrams]
__SCREAMING_SNAKE_CASE : int = Counter(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[str] = Counter(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Dict = Counter()
for sgram, scount in sgramcounter.items():
__SCREAMING_SNAKE_CASE : Tuple = scount * numref
__SCREAMING_SNAKE_CASE : List[str] = Counter(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Dict = Counter()
for cgram, ccount in cgramcounter.items():
__SCREAMING_SNAKE_CASE : List[Any] = ccount * numref
# KEEP
__SCREAMING_SNAKE_CASE : Union[str, Any] = sgramcounter_rep & cgramcounter_rep
__SCREAMING_SNAKE_CASE : Any = keepgramcounter_rep & rgramcounter
__SCREAMING_SNAKE_CASE : List[Any] = sgramcounter_rep & rgramcounter
__SCREAMING_SNAKE_CASE : Optional[Any] = 0
__SCREAMING_SNAKE_CASE : List[str] = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__SCREAMING_SNAKE_CASE : Dict = 1
__SCREAMING_SNAKE_CASE : Dict = 1
if len(__SCREAMING_SNAKE_CASE ) > 0:
__SCREAMING_SNAKE_CASE : Tuple = keeptmpscorea / len(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
__SCREAMING_SNAKE_CASE : Dict = keeptmpscorea / sum(keepgramcounterall_rep.values() )
__SCREAMING_SNAKE_CASE : Optional[int] = 0
if keepscore_precision > 0 or keepscore_recall > 0:
__SCREAMING_SNAKE_CASE : Optional[int] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
__SCREAMING_SNAKE_CASE : Tuple = sgramcounter_rep - cgramcounter_rep
__SCREAMING_SNAKE_CASE : List[Any] = delgramcounter_rep - rgramcounter
__SCREAMING_SNAKE_CASE : Dict = sgramcounter_rep - rgramcounter
__SCREAMING_SNAKE_CASE : str = 0
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__SCREAMING_SNAKE_CASE : Optional[int] = 1
if len(__SCREAMING_SNAKE_CASE ) > 0:
__SCREAMING_SNAKE_CASE : Optional[int] = deltmpscorea / len(__SCREAMING_SNAKE_CASE )
# ADDITION
__SCREAMING_SNAKE_CASE : Optional[Any] = set(__SCREAMING_SNAKE_CASE ) - set(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : List[str] = set(__SCREAMING_SNAKE_CASE ) & set(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Optional[Any] = set(__SCREAMING_SNAKE_CASE ) - set(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Optional[int] = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
__SCREAMING_SNAKE_CASE : Optional[int] = 1
__SCREAMING_SNAKE_CASE : Any = 1
if len(__SCREAMING_SNAKE_CASE ) > 0:
__SCREAMING_SNAKE_CASE : Optional[int] = addtmpscore / len(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
__SCREAMING_SNAKE_CASE : Dict = addtmpscore / len(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : int = 0
if addscore_precision > 0 or addscore_recall > 0:
__SCREAMING_SNAKE_CASE : int = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def A_ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> str:
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : int = ssent.split(''' ''' )
__SCREAMING_SNAKE_CASE : Any = csent.split(''' ''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : Dict = []
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : Any = []
__SCREAMING_SNAKE_CASE : List[str] = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : Optional[int] = []
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
for rsent in rsents:
__SCREAMING_SNAKE_CASE : Dict = rsent.split(''' ''' )
__SCREAMING_SNAKE_CASE : Any = []
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : str = []
ragramslist.append(__SCREAMING_SNAKE_CASE )
for i in range(0 , len(__SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(__SCREAMING_SNAKE_CASE ) - 1:
__SCREAMING_SNAKE_CASE : Dict = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(__SCREAMING_SNAKE_CASE )
if i < len(__SCREAMING_SNAKE_CASE ) - 2:
__SCREAMING_SNAKE_CASE : int = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(__SCREAMING_SNAKE_CASE )
if i < len(__SCREAMING_SNAKE_CASE ) - 3:
__SCREAMING_SNAKE_CASE : Optional[int] = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(__SCREAMING_SNAKE_CASE )
ragramslist.append(__SCREAMING_SNAKE_CASE )
ragramslist.append(__SCREAMING_SNAKE_CASE )
ragramslist.append(__SCREAMING_SNAKE_CASE )
for i in range(0 , len(__SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(__SCREAMING_SNAKE_CASE ) - 1:
__SCREAMING_SNAKE_CASE : int = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(__SCREAMING_SNAKE_CASE )
if i < len(__SCREAMING_SNAKE_CASE ) - 2:
__SCREAMING_SNAKE_CASE : List[Any] = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(__SCREAMING_SNAKE_CASE )
if i < len(__SCREAMING_SNAKE_CASE ) - 3:
__SCREAMING_SNAKE_CASE : int = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(__SCREAMING_SNAKE_CASE )
for i in range(0 , len(__SCREAMING_SNAKE_CASE ) - 1 ):
if i < len(__SCREAMING_SNAKE_CASE ) - 1:
__SCREAMING_SNAKE_CASE : Union[str, Any] = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(__SCREAMING_SNAKE_CASE )
if i < len(__SCREAMING_SNAKE_CASE ) - 2:
__SCREAMING_SNAKE_CASE : List[str] = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(__SCREAMING_SNAKE_CASE )
if i < len(__SCREAMING_SNAKE_CASE ) - 3:
__SCREAMING_SNAKE_CASE : int = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(__SCREAMING_SNAKE_CASE )
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) : Optional[Any] = SARIngram(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) : List[Any] = SARIngram(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) : int = SARIngram(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) : List[Any] = SARIngram(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Tuple = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
__SCREAMING_SNAKE_CASE : int = sum([delascore, delascore, delascore, delascore] ) / 4
__SCREAMING_SNAKE_CASE : Optional[Any] = sum([addascore, addascore, addascore, addascore] ) / 4
__SCREAMING_SNAKE_CASE : int = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def A_ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : str = "13a" , __SCREAMING_SNAKE_CASE : bool = True ) -> List[Any]:
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
__SCREAMING_SNAKE_CASE : Union[str, Any] = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
__SCREAMING_SNAKE_CASE : int = sacrebleu.metrics.bleu._get_tokenizer(__SCREAMING_SNAKE_CASE )()(__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE : str = sacrebleu.TOKENIZERS[tokenizer]()(__SCREAMING_SNAKE_CASE )
elif tokenizer == "moses":
__SCREAMING_SNAKE_CASE : Union[str, Any] = sacremoses.MosesTokenizer().tokenize(__SCREAMING_SNAKE_CASE , return_str=__SCREAMING_SNAKE_CASE , escape=__SCREAMING_SNAKE_CASE )
elif tokenizer == "penn":
__SCREAMING_SNAKE_CASE : Union[str, Any] = sacremoses.MosesTokenizer().penn_tokenize(__SCREAMING_SNAKE_CASE , return_str=__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = sentence
if not return_str:
__SCREAMING_SNAKE_CASE : List[str] = normalized_sent.split()
return normalized_sent
def A_ ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any ) -> int:
if not (len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
__SCREAMING_SNAKE_CASE : List[Any] = 0
for src, pred, refs in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
sari_score += SARIsent(normalize(__SCREAMING_SNAKE_CASE ) , normalize(__SCREAMING_SNAKE_CASE ) , [normalize(__SCREAMING_SNAKE_CASE ) for sent in refs] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = sari_score / len(__SCREAMING_SNAKE_CASE )
return 1_00 * sari_score
def A_ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Dict="exp" , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=False , ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : int = len(references[0] )
if any(len(__SCREAMING_SNAKE_CASE ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = [[refs[i] for refs in references] for i in range(__SCREAMING_SNAKE_CASE )]
__SCREAMING_SNAKE_CASE : str = sacrebleu.corpus_bleu(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , smooth_method=__SCREAMING_SNAKE_CASE , smooth_value=__SCREAMING_SNAKE_CASE , force=__SCREAMING_SNAKE_CASE , lowercase=__SCREAMING_SNAKE_CASE , use_effective_order=__SCREAMING_SNAKE_CASE , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
def _snake_case ( self ) -> Optional[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def _snake_case ( self , lowercase , lowercase , lowercase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[int] = {}
result.update({'''sari''': compute_sari(sources=lowercase , predictions=lowercase , references=lowercase )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=lowercase , references=lowercase )} )
result.update({'''exact''': compute_em(predictions=lowercase , references=lowercase )} )
return result
| 158 |
'''simple docstring'''
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def A_ ( __SCREAMING_SNAKE_CASE : ndarray ) -> float:
return np.dot(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE_ :
def __init__( self , *,
lowercase = np.inf , lowercase = "linear" , lowercase = 0.0 , ) -> None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = regularization
__SCREAMING_SNAKE_CASE : List[str] = gamma
if kernel == "linear":
__SCREAMING_SNAKE_CASE : str = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
__SCREAMING_SNAKE_CASE : str = f"""Unknown kernel: {kernel}"""
raise ValueError(lowercase )
def _snake_case ( self , lowercase , lowercase ) -> float:
'''simple docstring'''
return np.dot(lowercase , lowercase )
def _snake_case ( self , lowercase , lowercase ) -> float:
'''simple docstring'''
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def _snake_case ( self , lowercase , lowercase ) -> None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[int] = observations
__SCREAMING_SNAKE_CASE : str = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((__SCREAMING_SNAKE_CASE) , ) : int = np.shape(lowercase )
def to_minimize(lowercase ) -> float:
__SCREAMING_SNAKE_CASE : Dict = 0
((__SCREAMING_SNAKE_CASE) , ) : Optional[Any] = np.shape(lowercase )
for i in range(lowercase ):
for j in range(lowercase ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(lowercase )
__SCREAMING_SNAKE_CASE : str = LinearConstraint(lowercase , 0 , 0 )
__SCREAMING_SNAKE_CASE : Dict = Bounds(0 , self.regularization )
__SCREAMING_SNAKE_CASE : Optional[int] = minimize(
lowercase , np.ones(lowercase ) , bounds=lowercase , constraints=[ly_contraint] ).x
__SCREAMING_SNAKE_CASE : str = l_star
# calculating mean offset of separation plane to points
__SCREAMING_SNAKE_CASE : Any = 0
for i in range(lowercase ):
for j in range(lowercase ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
__SCREAMING_SNAKE_CASE : Tuple = s / n
def _snake_case ( self , lowercase ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Any = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , lowercase )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 158 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
lowerCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ (__lowercase ):
def __init__( self , _a , _a ) -> List[str]:
super().__init__()
self.register_modules(unet=_a , scheduler=_a )
@torch.no_grad()
def __call__( self , _a = 1 , _a = 100 , _a = None , _a = None , _a = True , ) -> Union[AudioPipelineOutput, Tuple]:
if audio_length_in_s is None:
lowerCAmelCase_ = self.unet.config.sample_size / self.unet.config.sample_rate
lowerCAmelCase_ = audio_length_in_s * self.unet.config.sample_rate
lowerCAmelCase_ = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"{audio_length_in_s} is too small. Make sure it's bigger or equal to"
f" {3 * down_scale_factor / self.unet.config.sample_rate}." )
lowerCAmelCase_ = int(_a )
if sample_size % down_scale_factor != 0:
lowerCAmelCase_ = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"
f" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"
" process." )
lowerCAmelCase_ = int(_a )
lowerCAmelCase_ = next(iter(self.unet.parameters() ) ).dtype
lowerCAmelCase_ = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_a , _a ) and len(_a ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_a )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
lowerCAmelCase_ = randn_tensor(_a , generator=_a , device=self.device , dtype=_a )
# set step values
self.scheduler.set_timesteps(_a , device=audio.device )
lowerCAmelCase_ = self.scheduler.timesteps.to(_a )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCAmelCase_ = self.unet(_a , _a ).sample
# 2. compute previous image: x_t -> t_t-1
lowerCAmelCase_ = self.scheduler.step(_a , _a , _a ).prev_sample
lowerCAmelCase_ = audio.clamp(-1 , 1 ).float().cpu().numpy()
lowerCAmelCase_ = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_a )
| 716 |
import argparse
import os
import re
lowerCamelCase__ = '''src/transformers'''
# Pattern that looks at the indentation in a line.
lowerCamelCase__ = re.compile(R'''^(\s*)\S''')
# Pattern that matches `"key":" and puts `key` in group 0.
lowerCamelCase__ = re.compile(R'''^\s*"([^"]+)":''')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowerCamelCase__ = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''')
# Pattern that matches `"key",` and puts `key` in group 0.
lowerCamelCase__ = re.compile(R'''^\s*"([^"]+)",\s*$''')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowerCamelCase__ = re.compile(R'''\[([^\]]+)\]''')
def A(__a: List[Any] ):
lowerCAmelCase_ = _re_indent.search(__a )
return "" if search is None else search.groups()[0]
def A(__a: Optional[Any] , __a: Optional[Any]="" , __a: Optional[int]=None , __a: Optional[int]=None ):
lowerCAmelCase_ = 0
lowerCAmelCase_ = code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(__a ):
index += 1
lowerCAmelCase_ = ["\n".join(lines[:index] )]
else:
lowerCAmelCase_ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCAmelCase_ = [lines[index]]
index += 1
while index < len(__a ) and (end_prompt is None or not lines[index].startswith(__a )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__a ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(__a ) )
if index < len(__a ) - 1:
lowerCAmelCase_ = [lines[index + 1]]
index += 1
else:
lowerCAmelCase_ = []
else:
blocks.append("\n".join(__a ) )
lowerCAmelCase_ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__a ) > 0:
blocks.append("\n".join(__a ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__a ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def A(__a: Tuple ):
def _inner(__a: Optional[int] ):
return key(__a ).lower().replace("_" , "" )
return _inner
def A(__a: str , __a: Optional[Any]=None ):
# If no key is provided, we use a noop.
def noop(__a: List[Any] ):
return x
if key is None:
lowerCAmelCase_ = noop
# Constants are all uppercase, they go first.
lowerCAmelCase_ = [obj for obj in objects if key(__a ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCAmelCase_ = [obj for obj in objects if key(__a )[0].isupper() and not key(__a ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCAmelCase_ = [obj for obj in objects if not key(__a )[0].isupper()]
lowerCAmelCase_ = ignore_underscore(__a )
return sorted(__a , key=__a ) + sorted(__a , key=__a ) + sorted(__a , key=__a )
def A(__a: Dict ):
# This inner function sort imports between [ ].
def _replace(__a: Any ):
lowerCAmelCase_ = match.groups()[0]
if "," not in imports:
return F"[{imports}]"
lowerCAmelCase_ = [part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase_ = keys[:-1]
return "[" + ", ".join([F"\"{k}\"" for k in sort_objects(__a )] ) + "]"
lowerCAmelCase_ = import_statement.split("\n" )
if len(__a ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCAmelCase_ = 2 if lines[1].strip() == "[" else 1
lowerCAmelCase_ = [(i, _re_strip_line.search(__a ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCAmelCase_ = sort_objects(__a , key=lambda __a : x[1] )
lowerCAmelCase_ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__a ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCAmelCase_ = _re_bracket_content.sub(_replace , lines[1] )
else:
lowerCAmelCase_ = [part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase_ = keys[:-1]
lowerCAmelCase_ = get_indent(lines[1] ) + ", ".join([F"\"{k}\"" for k in sort_objects(__a )] )
return "\n".join(__a )
else:
# Finally we have to deal with imports fitting on one line
lowerCAmelCase_ = _re_bracket_content.sub(_replace , __a )
return import_statement
def A(__a: Union[str, Any] , __a: str=True ):
with open(__a , encoding="utf-8" ) as f:
lowerCAmelCase_ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCAmelCase_ = split_code_in_indented_blocks(
__a , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__a ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCAmelCase_ = main_blocks[block_idx]
lowerCAmelCase_ = block.split("\n" )
# Get to the start of the imports.
lowerCAmelCase_ = 0
while line_idx < len(__a ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCAmelCase_ = len(__a )
else:
line_idx += 1
if line_idx >= len(__a ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCAmelCase_ = "\n".join(block_lines[line_idx:-1] )
lowerCAmelCase_ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCAmelCase_ = split_code_in_indented_blocks(__a , indent_level=__a )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCAmelCase_ = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCAmelCase_ = [(pattern.search(__a ).groups()[0] if pattern.search(__a ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCAmelCase_ = [(i, key) for i, key in enumerate(__a ) if key is not None]
lowerCAmelCase_ = [x[0] for x in sorted(__a , key=lambda __a : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCAmelCase_ = 0
lowerCAmelCase_ = []
for i in range(len(__a ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
lowerCAmelCase_ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(__a )
count += 1
# And we put our main block back together with its first and last line.
lowerCAmelCase_ = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(__a ):
if check_only:
return True
else:
print(F"Overwriting {file}." )
with open(__a , "w" , encoding="utf-8" ) as f:
f.write("\n".join(__a ) )
def A(__a: Any=True ):
lowerCAmelCase_ = []
for root, _, files in os.walk(__a ):
if "__init__.py" in files:
lowerCAmelCase_ = sort_imports(os.path.join(__a , "__init__.py" ) , check_only=__a )
if result:
lowerCAmelCase_ = [os.path.join(__a , "__init__.py" )]
if len(__a ) > 0:
raise ValueError(F"Would overwrite {len(__a )} files, run `make style`." )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
lowerCamelCase__ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 226 | 0 |
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
lowerCAmelCase = logging.get_logger(__name__)
def __A ( a_ : Optional[int] ,a_ : Any ,a_ : List[str] ,a_ : Any=False ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
if not is_sharded:
lowerCAmelCase : Union[str, Any] = os.path.abspath(a_ )
logger.info(f'''Loading PyTorch weights from {pt_path}''' )
lowerCAmelCase : List[Any] = torch.load(a_ ,map_location="cpu" )
logger.info(f'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' )
lowerCAmelCase : Optional[int] = convert_pytorch_state_dict_to_flax(a_ ,a_ )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
lowerCAmelCase : Optional[int] = convert_pytorch_sharded_state_dict_to_flax(a_ ,a_ )
return flax_state_dict
def __A ( a_ : Tuple[str] ,a_ : np.ndarray ,a_ : Dict[str, jnp.ndarray] ,a_ : str ,):
def is_key_or_prefix_key_in_dict(a_ : Tuple[str] ) -> bool:
return len(set(a_ ) & {key, (model_prefix,) + key} ) > 0
# layer norm
lowerCAmelCase : int = pt_tuple_key[:-1] + ("scale",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(a_ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
lowerCAmelCase : int = pt_tuple_key[:-1] + ("mean",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(a_ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
lowerCAmelCase : Optional[Any] = pt_tuple_key[:-1] + ("var",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(a_ ):
return renamed_pt_tuple_key, pt_tensor
# embedding
lowerCAmelCase : List[Any] = pt_tuple_key[:-1] + ("embedding",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(a_ ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCAmelCase : Any = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(a_ ):
lowerCAmelCase : Tuple = pt_tensor.transpose(2 ,3 ,1 ,0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCAmelCase : Any = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(a_ ):
lowerCAmelCase : int = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCAmelCase : Tuple = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCAmelCase : Union[str, Any] = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
lowerCAmelCase : Optional[Any] = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
lowerCAmelCase : int = pt_tuple_key[-2] + "_g"
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
lowerCAmelCase : List[Any] = pt_tuple_key[-2] + "_v"
if name is not None:
lowerCAmelCase : Any = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __A ( a_ : Any ,a_ : Tuple ):
# convert pytorch tensor to numpy
lowerCAmelCase : Tuple = {k: v.numpy() for k, v in pt_state_dict.items()}
lowerCAmelCase : int = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
lowerCAmelCase : Any = flax_model.params["params"]
else:
lowerCAmelCase : Tuple = flax_model.params
lowerCAmelCase : int = flatten_dict(a_ )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCAmelCase : List[Any] = flatten_dict(flax_model.params["batch_stats"] )
random_flax_state_dict.update(a_ )
lowerCAmelCase : Optional[Any] = {}
lowerCAmelCase : List[Any] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
lowerCAmelCase : Union[str, Any] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCAmelCase : Tuple = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
lowerCAmelCase : str = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCAmelCase : str = pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCAmelCase , lowerCAmelCase : List[str] = rename_key_and_reshape_tensor(
a_ ,a_ ,a_ ,a_ )
# add model prefix if necessary
lowerCAmelCase : List[str] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCAmelCase : Any = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
lowerCAmelCase : Union[str, Any] = jnp.asarray(a_ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(a_ ,a_ )
continue
# also add unexpected weight so that warning is thrown
lowerCAmelCase : Dict = jnp.asarray(a_ )
else:
# also add unexpected weight so that warning is thrown
lowerCAmelCase : str = jnp.asarray(a_ )
return unflatten_dict(a_ )
def __A ( a_ : str ,a_ : str ):
import torch
# Load the index
lowerCAmelCase : Dict = {}
for shard_file in shard_filenames:
# load using msgpack utils
lowerCAmelCase : List[str] = torch.load(a_ )
lowerCAmelCase : str = {k: v.numpy() for k, v in pt_state_dict.items()}
lowerCAmelCase : Any = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCAmelCase : Any = flax_model.params["params"]
lowerCAmelCase : List[str] = flatten_dict(a_ )
random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"] ) )
else:
lowerCAmelCase : List[str] = flax_model.params
lowerCAmelCase : Any = flatten_dict(a_ )
lowerCAmelCase : Union[str, Any] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
lowerCAmelCase : List[str] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCAmelCase : str = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
lowerCAmelCase : Tuple = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCAmelCase : Any = pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCAmelCase , lowerCAmelCase : str = rename_key_and_reshape_tensor(
a_ ,a_ ,a_ ,a_ )
# add model prefix if necessary
lowerCAmelCase : List[str] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCAmelCase : Tuple = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
lowerCAmelCase : Optional[int] = jnp.asarray(a_ )
continue
if "var" in flax_key[-1]:
lowerCAmelCase : int = jnp.asarray(a_ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(a_ ,a_ )
continue
# also add unexpected weight so that warning is thrown
lowerCAmelCase : Union[str, Any] = jnp.asarray(a_ )
else:
# also add unexpected weight so that warning is thrown
lowerCAmelCase : Union[str, Any] = jnp.asarray(a_ )
return unflatten_dict(a_ )
def __A ( a_ : Optional[Any] ,a_ : Tuple ):
lowerCAmelCase : Dict = os.path.abspath(a_ )
logger.info(f'''Loading Flax weights from {flax_checkpoint_path}''' )
# import correct flax class
lowerCAmelCase : Optional[Any] = getattr(a_ ,"Flax" + model.__class__.__name__ )
# load flax weight dict
with open(a_ ,"rb" ) as state_f:
try:
lowerCAmelCase : List[Any] = from_bytes(a_ ,state_f.read() )
except UnpicklingError:
raise EnvironmentError(f'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(a_ ,a_ )
def __A ( a_ : Dict ,a_ : List[str] ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
lowerCAmelCase : Any = flatten_dict(jax.tree_util.tree_map(lambda a_ : x.dtype == jnp.bfloataa ,a_ ) ).values()
if any(a_ ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
lowerCAmelCase : Union[str, Any] = jax.tree_util.tree_map(
lambda a_ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params ,a_ )
lowerCAmelCase : Optional[Any] = flatten_dict(a_ )
lowerCAmelCase : Dict = pt_model.state_dict()
lowerCAmelCase : Union[str, Any] = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split("." )[0] for k in pt_model_dict.keys()}
)
lowerCAmelCase : Tuple = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split("." )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
lowerCAmelCase : int = []
lowerCAmelCase : Optional[Any] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowerCAmelCase : List[str] = flax_key_tuple[0] == pt_model.base_model_prefix
lowerCAmelCase : Any = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCAmelCase : Optional[int] = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
lowerCAmelCase : str = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(a_ ) not in pt_model_dict:
# conv layer
lowerCAmelCase : Tuple = flax_key_tuple[:-1] + ("weight",)
lowerCAmelCase : List[Any] = jnp.transpose(a_ ,(3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(a_ ) not in pt_model_dict:
# linear layer
lowerCAmelCase : Optional[int] = flax_key_tuple[:-1] + ("weight",)
lowerCAmelCase : Optional[Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowerCAmelCase : List[str] = flax_key_tuple[:-1] + ("weight",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
lowerCAmelCase : Any = flax_key_tuple[:-1] + ("running_mean",)
elif "var" in flax_key_tuple[-1]:
lowerCAmelCase : int = flax_key_tuple[:-1] + ("running_var",)
if "batch_stats" in flax_state:
lowerCAmelCase : Any = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
lowerCAmelCase : Union[str, Any] = ".".join(a_ )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
lowerCAmelCase : Tuple = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
lowerCAmelCase : Optional[Any] = key.split("." )
lowerCAmelCase : Optional[int] = None
if key_components[-3::2] == ["parametrizations", "original0"]:
lowerCAmelCase : str = key_components[-2] + "_g"
elif key_components[-3::2] == ["parametrizations", "original1"]:
lowerCAmelCase : Optional[Any] = key_components[-2] + "_v"
if name is not None:
lowerCAmelCase : Optional[int] = key_components[:-3] + [name]
lowerCAmelCase : List[Any] = ".".join(a_ )
lowerCAmelCase : List[str] = key
if flax_key in special_pt_names:
lowerCAmelCase : Optional[Any] = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
f'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
lowerCAmelCase : Optional[Any] = np.asarray(a_ ) if not isinstance(a_ ,np.ndarray ) else flax_tensor
lowerCAmelCase : Tuple = torch.from_numpy(a_ )
# remove from missing keys
missing_keys.remove(a_ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(a_ )
pt_model.load_state_dict(a_ )
# re-transform missing_keys to list
lowerCAmelCase : str = list(a_ )
if len(a_ ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
f''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
f''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
f''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
else:
logger.warning(f'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' )
if len(a_ ) > 0:
logger.warning(
f'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
f''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
" use it for predictions and inference." )
else:
logger.warning(
f'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'''
"If your task is similar to the task the model of the checkpoint was trained on, "
f'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' )
return pt_model
| 525 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( snake_case : int )-> int:
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
_lowerCamelCase = 1
_lowerCamelCase = 1
while repunit:
_lowerCamelCase = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def SCREAMING_SNAKE_CASE_ ( snake_case : int = 1_000_000 )-> int:
_lowerCamelCase = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(snake_case ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f'{solution() = }')
| 650 | 0 |
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.6_0_9_3_4_4,
"knot": 1.8_5_2,
}
SCREAMING_SNAKE_CASE_ = {
"km/h": 1.0,
"m/s": 0.2_7_7_7_7_7_7_7_8,
"mph": 0.6_2_1_3_7_1_1_9_2,
"knot": 0.5_3_9_9_5_6_8_0_3,
}
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
__lowerCAmelCase = (
f"""Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n"""
f"""Valid values are: {", ".join(_lowerCAmelCase )}"""
)
raise ValueError(_lowerCAmelCase )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 573 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=32 , snake_case_=3 , snake_case_=4 , snake_case_=[10, 20, 30, 40] , snake_case_=[2, 2, 3, 2] , snake_case_=True , snake_case_=True , snake_case_=37 , snake_case_="gelu" , snake_case_=10 , snake_case_=0.02 , snake_case_=["stage2", "stage3", "stage4"] , snake_case_=[2, 3, 4] , snake_case_=None , ) -> List[str]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = num_stages
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = depths
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = num_labels
__lowerCAmelCase = initializer_range
__lowerCAmelCase = out_features
__lowerCAmelCase = out_indices
__lowerCAmelCase = scope
def A__ ( self ) -> List[Any]:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def A__ ( self ) -> List[Any]:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=snake_case_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def A__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
__lowerCAmelCase = ConvNextModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__lowerCAmelCase = model(snake_case_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
__lowerCAmelCase = ConvNextForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
__lowerCAmelCase = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
__lowerCAmelCase = ConvNextBackbone(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__lowerCAmelCase = model(snake_case_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__lowerCAmelCase = None
__lowerCAmelCase = ConvNextBackbone(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__lowerCAmelCase = model(snake_case_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A__ ( self ) -> List[str]:
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
_snake_case = (
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
_snake_case = True
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
def A__ ( self ) -> int:
__lowerCAmelCase = ConvNextModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def A__ ( self ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self ) -> str:
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def A__ ( self ) -> str:
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def A__ ( self ) -> List[Any]:
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def A__ ( self ) -> Optional[int]:
pass
def A__ ( self ) -> List[str]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(snake_case_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_ )
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def A__ ( self ) -> Dict:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*snake_case_ )
def A__ ( self ) -> Union[str, Any]:
def check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ ):
__lowerCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
__lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(snake_case_ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
def A__ ( self ) -> str:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def A__ ( self ) -> str:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = ConvNextModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def lowercase ():
__lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self ) -> Any:
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def A__ ( self ) -> Any:
__lowerCAmelCase = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(snake_case_ )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=snake_case_ , return_tensors="""pt""" ).to(snake_case_ )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**snake_case_ )
# verify the logits
__lowerCAmelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case_ )
__lowerCAmelCase = torch.tensor([-0.0_260, -0.4_739, 0.1_911] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1e-4 ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase , A__ ):
'''simple docstring'''
_snake_case = (ConvNextBackbone,) if is_torch_available() else ()
_snake_case = ConvNextConfig
_snake_case = False
def A__ ( self ) -> Dict:
__lowerCAmelCase = ConvNextModelTester(self )
| 573 | 1 |
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: str ):
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 449 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
A = False
class _a ( unittest.TestCase):
def __lowercase ( self : str ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowercase ( self : List[str] ) -> Any:
return 12
@property
def __lowercase ( self : List[str] ) -> Dict:
return 12
@property
def __lowercase ( self : Optional[int] ) -> Union[str, Any]:
return 32
@property
def __lowercase ( self : int ) -> Dict:
torch.manual_seed(0 )
snake_case : List[str] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def __lowercase ( self : str ) -> List[Any]:
snake_case : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def __lowercase ( self : Union[str, Any] ) -> Any:
torch.manual_seed(0 )
snake_case : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(_lowercase )
@property
def __lowercase ( self : Optional[int] ) -> Optional[int]:
torch.manual_seed(0 )
snake_case : List[Any] = 12
snake_case : Dict = 12
snake_case : Tuple = {
"attention_bias": True,
"cross_attention_dim": 32,
"attention_head_dim": height * width,
"num_attention_heads": 1,
"num_vector_embeds": self.num_embed,
"num_embeds_ada_norm": self.num_embeds_ada_norm,
"norm_num_groups": 32,
"sample_size": width,
"activation_fn": "geglu-approximate",
}
snake_case : Dict = TransformeraDModel(**_lowercase )
return model
def __lowercase ( self : Optional[int] ) -> Tuple:
snake_case : Optional[Any] = "cpu"
snake_case : Optional[int] = self.dummy_vqvae
snake_case : Dict = self.dummy_text_encoder
snake_case : Tuple = self.dummy_tokenizer
snake_case : List[Any] = self.dummy_transformer
snake_case : List[Any] = VQDiffusionScheduler(self.num_embed )
snake_case : List[Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=_lowercase )
snake_case : Dict = VQDiffusionPipeline(
vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
snake_case : Any = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case : Optional[Any] = "teddy bear playing in the pool"
snake_case : Tuple = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case : Tuple = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type="np" )
snake_case : Optional[int] = output.images
snake_case : int = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case : List[str] = pipe(
[prompt] , generator=_lowercase , output_type="np" , return_dict=_lowercase , num_inference_steps=2 )[0]
snake_case : List[Any] = image[0, -3:, -3:, -1]
snake_case : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case : str = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self : Union[str, Any] ) -> Optional[int]:
snake_case : List[str] = "cpu"
snake_case : Dict = self.dummy_vqvae
snake_case : List[Any] = self.dummy_text_encoder
snake_case : Optional[Any] = self.dummy_tokenizer
snake_case : int = self.dummy_transformer
snake_case : str = VQDiffusionScheduler(self.num_embed )
snake_case : Optional[int] = LearnedClassifierFreeSamplingEmbeddings(
learnable=_lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
snake_case : List[str] = VQDiffusionPipeline(
vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
snake_case : Optional[Any] = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
snake_case : Dict = "teddy bear playing in the pool"
snake_case : Union[str, Any] = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case : Union[str, Any] = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type="np" )
snake_case : Optional[Any] = output.images
snake_case : Dict = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case : Union[str, Any] = pipe(
[prompt] , generator=_lowercase , output_type="np" , return_dict=_lowercase , num_inference_steps=2 )[0]
snake_case : Any = image[0, -3:, -3:, -1]
snake_case : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case : Optional[Any] = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _a ( unittest.TestCase):
def __lowercase ( self : Optional[int] ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : Dict ) -> Tuple:
snake_case : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy" )
snake_case : Tuple = VQDiffusionPipeline.from_pretrained("microsoft/vq-diffusion-ithq" )
snake_case : Union[str, Any] = pipeline.to(_lowercase )
pipeline.set_progress_bar_config(disable=_lowercase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case : Tuple = torch.Generator(device=_lowercase ).manual_seed(0 )
snake_case : Optional[int] = pipeline(
"teddy bear playing in the pool" , num_images_per_prompt=1 , generator=_lowercase , output_type="np" , )
snake_case : List[str] = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 449 | 1 |
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json',
'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json',
'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json',
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Dict = "owlvit_text_model"
def __init__( self : Tuple , lowerCamelCase__ : List[Any]=4_94_08 , lowerCamelCase__ : Optional[Any]=5_12 , lowerCamelCase__ : Optional[int]=20_48 , lowerCamelCase__ : Any=12 , lowerCamelCase__ : List[str]=8 , lowerCamelCase__ : Dict=16 , lowerCamelCase__ : Tuple="quick_gelu" , lowerCamelCase__ : Dict=1E-5 , lowerCamelCase__ : Union[str, Any]=0.0 , lowerCamelCase__ : Optional[Any]=0.0_2 , lowerCamelCase__ : Tuple=1.0 , lowerCamelCase__ : List[Any]=0 , lowerCamelCase__ : Optional[int]=4_94_06 , lowerCamelCase__ : Tuple=4_94_07 , **lowerCamelCase__ : Optional[int] , ) ->int:
'''simple docstring'''
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : List[str] = vocab_size
_UpperCAmelCase : List[str] = hidden_size
_UpperCAmelCase : Optional[int] = intermediate_size
_UpperCAmelCase : str = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Dict = max_position_embeddings
_UpperCAmelCase : Dict = hidden_act
_UpperCAmelCase : Optional[Any] = layer_norm_eps
_UpperCAmelCase : Any = attention_dropout
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Optional[Any] = initializer_factor
@classmethod
def lowerCAmelCase__ ( cls : int , lowerCamelCase__ : List[str] , **lowerCamelCase__ : Optional[int] ) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase , _UpperCAmelCase : Tuple = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
_UpperCAmelCase : str = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : int = "owlvit_vision_model"
def __init__( self : List[str] , lowerCamelCase__ : Tuple=7_68 , lowerCamelCase__ : str=30_72 , lowerCamelCase__ : List[str]=12 , lowerCamelCase__ : List[Any]=12 , lowerCamelCase__ : Any=3 , lowerCamelCase__ : Optional[Any]=7_68 , lowerCamelCase__ : str=32 , lowerCamelCase__ : Optional[int]="quick_gelu" , lowerCamelCase__ : List[Any]=1E-5 , lowerCamelCase__ : int=0.0 , lowerCamelCase__ : int=0.0_2 , lowerCamelCase__ : Any=1.0 , **lowerCamelCase__ : Optional[int] , ) ->List[Any]:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Optional[int] = hidden_size
_UpperCAmelCase : Union[str, Any] = intermediate_size
_UpperCAmelCase : Optional[int] = num_hidden_layers
_UpperCAmelCase : Any = num_attention_heads
_UpperCAmelCase : int = num_channels
_UpperCAmelCase : Any = image_size
_UpperCAmelCase : Optional[Any] = patch_size
_UpperCAmelCase : List[str] = hidden_act
_UpperCAmelCase : Union[str, Any] = layer_norm_eps
_UpperCAmelCase : Union[str, Any] = attention_dropout
_UpperCAmelCase : str = initializer_range
_UpperCAmelCase : Any = initializer_factor
@classmethod
def lowerCAmelCase__ ( cls : Tuple , lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
_UpperCAmelCase : List[Any] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Dict = "owlvit"
lowerCAmelCase : Union[str, Any] = True
def __init__( self : int , lowerCamelCase__ : int=None , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Optional[Any]=5_12 , lowerCamelCase__ : List[Any]=2.6_5_9_2 , lowerCamelCase__ : Any=True , **lowerCamelCase__ : Any , ) ->List[str]:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
if text_config is None:
_UpperCAmelCase : List[str] = {}
logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." )
if vision_config is None:
_UpperCAmelCase : Dict = {}
logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." )
_UpperCAmelCase : Union[str, Any] = OwlViTTextConfig(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Optional[Any] = OwlViTVisionConfig(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : int = projection_dim
_UpperCAmelCase : int = logit_scale_init_value
_UpperCAmelCase : str = return_dict
_UpperCAmelCase : Dict = 1.0
@classmethod
def lowerCAmelCase__ ( cls : Optional[Any] , lowerCamelCase__ : str , **lowerCamelCase__ : List[str] ) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase , _UpperCAmelCase : int = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@classmethod
def lowerCAmelCase__ ( cls : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any] , **lowerCamelCase__ : List[Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = {}
_UpperCAmelCase : int = text_config
_UpperCAmelCase : List[str] = vision_config
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( self : Any ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Dict = copy.deepcopy(self.__dict__ )
_UpperCAmelCase : Any = self.text_config.to_dict()
_UpperCAmelCase : Optional[int] = self.vision_config.to_dict()
_UpperCAmelCase : Dict = self.__class__.model_type
return output
class lowerCAmelCase__ ( UpperCAmelCase__ ):
@property
def lowerCAmelCase__ ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("attention_mask", {0: "batch", 1: "sequence"}),
] )
@property
def lowerCAmelCase__ ( self : List[str] ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("logits_per_image", {0: "batch"}),
("logits_per_text", {0: "batch"}),
("text_embeds", {0: "batch"}),
("image_embeds", {0: "batch"}),
] )
@property
def lowerCAmelCase__ ( self : Tuple ) ->float:
'''simple docstring'''
return 1E-4
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int = -1 , lowerCamelCase__ : str = -1 , lowerCamelCase__ : int = None , ) ->Mapping[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Any = super().generate_dummy_inputs(
processor.tokenizer , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : str = super().generate_dummy_inputs(
processor.image_processor , batch_size=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
return {**text_input_dict, **image_input_dict}
@property
def lowerCAmelCase__ ( self : Any ) ->int:
'''simple docstring'''
return 14
| 713 |
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__lowerCAmelCase , __lowerCAmelCase ) ) )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if dataset.ndim != value_array.ndim:
_UpperCAmelCase : Optional[Any] = (
"Wrong input data's dimensions... "
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(__lowerCAmelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
_UpperCAmelCase : Optional[int] = (
"Wrong input data's shape... "
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(__lowerCAmelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
_UpperCAmelCase : Union[str, Any] = (
"Input data have different datatype... "
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(__lowerCAmelCase )
_UpperCAmelCase : Optional[int] = []
for value in value_array:
_UpperCAmelCase : List[str] = euclidean(__lowerCAmelCase , dataset[0] )
_UpperCAmelCase : Dict = dataset[0].tolist()
for dataset_value in dataset[1:]:
_UpperCAmelCase : int = euclidean(__lowerCAmelCase , __lowerCAmelCase )
if dist > temp_dist:
_UpperCAmelCase : Tuple = temp_dist
_UpperCAmelCase : Dict = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return np.dot(__lowerCAmelCase , __lowerCAmelCase ) / (norm(__lowerCAmelCase ) * norm(__lowerCAmelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40 | 0 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
lowerCamelCase_ = logging.getLogger(__name__)
lowerCamelCase_ = "pytorch_model.bin"
@dataclasses.dataclass
class __a :
"""simple docstring"""
_A : str = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
_A : Optional[str] = dataclasses.field(
default=__lowerCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class __a :
"""simple docstring"""
_A : str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
_A : str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
_A : Optional[str] = dataclasses.field(
default=__lowerCamelCase , metadata={"help": "A csv or a json file containing the validation data."} )
_A : Optional[str] = dataclasses.field(
default=__lowerCamelCase , metadata={"help": "The name of the task to train on."} , )
_A : Optional[List[str]] = dataclasses.field(
default=__lowerCamelCase , metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class __a :
"""simple docstring"""
_A : str = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
_A : Optional[str] = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."} )
_A : Optional[str] = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
_A : Optional[int] = dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
_A : Optional[float] = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
_A : Optional[bool] = dataclasses.field(
default=__lowerCamelCase , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
_A : Optional[bool] = dataclasses.field(
default=__lowerCamelCase , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
_A : Optional[bool] = dataclasses.field(
default=__lowerCamelCase , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
_A : Optional[float] = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
_A : Optional[int] = dataclasses.field(
default=1_00 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
_A : Optional[int] = dataclasses.field(
default=__lowerCamelCase , metadata={"help": "Random seed for initialization."} , )
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =datasets.concatenate_datasets([infer_input, infer_output], axis=1 )
if args.do_filter_by_confidence:
SCREAMING_SNAKE_CASE__ =dataset.filter(lambda __UpperCamelCase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
SCREAMING_SNAKE_CASE__ =int(eval_result * len(__UpperCamelCase ) )
print(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =dataset.sort("""probability""", reverse=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =dataset.select(range(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE__ =dataset.remove_columns(["""label""", """probability"""] )
SCREAMING_SNAKE_CASE__ =dataset.rename_column("""prediction""", """label""" )
SCREAMING_SNAKE_CASE__ =dataset.map(lambda __UpperCamelCase : {"label": idalabel[example["label"]]} )
SCREAMING_SNAKE_CASE__ =dataset.shuffle(seed=args.seed )
SCREAMING_SNAKE_CASE__ =os.path.join(__UpperCamelCase, f"""train_pseudo.{args.data_file_extension}""" )
if args.data_file_extension == "csv":
dataset.to_csv(__UpperCamelCase, index=__UpperCamelCase )
else:
dataset.to_json(__UpperCamelCase )
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, **__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO, )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
SCREAMING_SNAKE_CASE__ =STModelArguments(model_name_or_path=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =STDataArguments(train_file=__UpperCamelCase, infer_file=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =STTrainingArguments(output_dir=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(__UpperCamelCase ).items():
setattr(__UpperCamelCase, __UpperCamelCase, __UpperCamelCase )
for key, value in kwargs.items():
if hasattr(__UpperCamelCase, __UpperCamelCase ):
setattr(__UpperCamelCase, __UpperCamelCase, __UpperCamelCase )
# Sanity checks
SCREAMING_SNAKE_CASE__ ={}
SCREAMING_SNAKE_CASE__ =None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
SCREAMING_SNAKE_CASE__ =args.train_file
SCREAMING_SNAKE_CASE__ =args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
SCREAMING_SNAKE_CASE__ =args.eval_file
for key in data_files:
SCREAMING_SNAKE_CASE__ =data_files[key].split(""".""" )[-1]
assert extension in ["csv", "json"], f"""`{key}_file` should be a csv or a json file."""
if args.data_file_extension is None:
SCREAMING_SNAKE_CASE__ =extension
else:
assert extension == args.data_file_extension, f"""`{key}_file` should be a {args.data_file_extension} file`."""
assert (
args.eval_metric in datasets.list_metrics()
), f"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."""
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("""Creating the initial data directory for self-training...""" )
SCREAMING_SNAKE_CASE__ =f"""{args.output_dir}/self-train_iter-{{}}""".format
SCREAMING_SNAKE_CASE__ =data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=__UpperCamelCase )
os.makedirs(__UpperCamelCase, exist_ok=__UpperCamelCase )
accelerator.wait_for_everyone()
SCREAMING_SNAKE_CASE__ =None
SCREAMING_SNAKE_CASE__ =None
SCREAMING_SNAKE_CASE__ =0
SCREAMING_SNAKE_CASE__ =False
# Show the progress bar
SCREAMING_SNAKE_CASE__ =tqdm(range(args.max_selftrain_iterations ), disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0, int(args.max_selftrain_iterations ) ):
SCREAMING_SNAKE_CASE__ =data_dir_format(__UpperCamelCase )
assert os.path.exists(__UpperCamelCase )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
SCREAMING_SNAKE_CASE__ =os.path.join(__UpperCamelCase, """stage-1""" )
SCREAMING_SNAKE_CASE__ ={
"""accelerator""": accelerator,
"""model_name_or_path""": args.model_name_or_path,
"""cache_dir""": args.cache_dir,
"""do_train""": True,
"""train_file""": data_files["""train"""] if iteration == 0 else data_files["""train_pseudo"""],
"""do_eval""": True if args.eval_file is not None else False,
"""eval_file""": data_files["""eval"""],
"""do_predict""": True,
"""infer_file""": data_files["""infer"""],
"""task_name""": args.task_name,
"""label_list""": args.label_list,
"""output_dir""": current_output_dir,
"""eval_metric""": args.eval_metric,
"""evaluation_strategy""": args.evaluation_strategy,
"""early_stopping_patience""": args.early_stopping_patience,
"""early_stopping_threshold""": args.early_stopping_threshold,
"""seed""": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(__UpperCamelCase, __UpperCamelCase ):
arguments_dict.update({key: value} )
SCREAMING_SNAKE_CASE__ =os.path.join(__UpperCamelCase, """best-checkpoint""", __UpperCamelCase )
if os.path.exists(__UpperCamelCase ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.""", __UpperCamelCase, __UpperCamelCase, )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 1 *****""", __UpperCamelCase )
finetune(**__UpperCamelCase )
accelerator.wait_for_everyone()
assert os.path.exists(__UpperCamelCase )
logger.info("""Self-training job completed: iteration: %d, stage: 1.""", __UpperCamelCase )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
SCREAMING_SNAKE_CASE__ =os.path.join(__UpperCamelCase, """best-checkpoint""" )
SCREAMING_SNAKE_CASE__ =os.path.join(__UpperCamelCase, """stage-2""" )
# Update arguments_dict
SCREAMING_SNAKE_CASE__ =model_path
SCREAMING_SNAKE_CASE__ =data_files["""train"""]
SCREAMING_SNAKE_CASE__ =current_output_dir
SCREAMING_SNAKE_CASE__ =os.path.join(__UpperCamelCase, """best-checkpoint""", __UpperCamelCase )
if os.path.exists(__UpperCamelCase ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.""", __UpperCamelCase, __UpperCamelCase, )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 2 *****""", __UpperCamelCase )
finetune(**__UpperCamelCase )
accelerator.wait_for_everyone()
assert os.path.exists(__UpperCamelCase )
logger.info("""Self-training job completed: iteration: %d, stage: 2.""", __UpperCamelCase )
SCREAMING_SNAKE_CASE__ =iteration
SCREAMING_SNAKE_CASE__ =data_dir_format(iteration + 1 )
SCREAMING_SNAKE_CASE__ =AutoConfig.from_pretrained(os.path.join(__UpperCamelCase, """best-checkpoint""" ) )
SCREAMING_SNAKE_CASE__ =config.idalabel
SCREAMING_SNAKE_CASE__ =os.path.join(__UpperCamelCase, """eval_results_best-checkpoint.json""" )
SCREAMING_SNAKE_CASE__ =os.path.join(__UpperCamelCase, """test_results_best-checkpoint.json""" )
assert os.path.exists(__UpperCamelCase )
with open(__UpperCamelCase, """r""" ) as f:
SCREAMING_SNAKE_CASE__ =float(json.load(__UpperCamelCase )[args.eval_metric] )
SCREAMING_SNAKE_CASE__ =os.path.join(__UpperCamelCase, """infer_output_best-checkpoint.csv""" )
assert os.path.exists(__UpperCamelCase )
# Loading the dataset from local csv or json files.
SCREAMING_SNAKE_CASE__ =load_dataset(args.data_file_extension, data_files={"""data""": data_files["""infer"""]} )["""data"""]
SCREAMING_SNAKE_CASE__ =load_dataset("""csv""", data_files={"""data""": infer_output_file} )["""data"""]
if accelerator.is_main_process:
os.makedirs(__UpperCamelCase, exist_ok=__UpperCamelCase )
shutil.copy(__UpperCamelCase, os.path.join(__UpperCamelCase, f"""eval_results_iter-{iteration}.json""" ) )
if os.path.exists(__UpperCamelCase ):
shutil.copy(__UpperCamelCase, os.path.join(__UpperCamelCase, f"""test_results_iter-{iteration}.json""" ) )
create_pseudo_labeled_data(__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase )
accelerator.wait_for_everyone()
SCREAMING_SNAKE_CASE__ =os.path.join(__UpperCamelCase, f"""train_pseudo.{args.data_file_extension}""" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
SCREAMING_SNAKE_CASE__ =eval_result
if best_iteration is None:
SCREAMING_SNAKE_CASE__ =new_iteration
SCREAMING_SNAKE_CASE__ =new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
SCREAMING_SNAKE_CASE__ =new_iteration
SCREAMING_SNAKE_CASE__ =new_eval_result
SCREAMING_SNAKE_CASE__ =0
else:
if new_eval_result == best_eval_result:
SCREAMING_SNAKE_CASE__ =new_iteration
SCREAMING_SNAKE_CASE__ =new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
SCREAMING_SNAKE_CASE__ =True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("""Best iteration: %d""", __UpperCamelCase )
logger.info("""Best evaluation result: %s = %f""", args.eval_metric, __UpperCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__UpperCamelCase, f"""eval_results_iter-{iteration}.json""" ), os.path.join(__UpperCamelCase, """eval_results_best-iteration.json""" ), )
else:
# Assume that the last iteration is the best
logger.info("""Best iteration: %d""", args.max_selftrain_iterations - 1 )
logger.info("""Best evaluation result: %s = %f""", args.eval_metric, __UpperCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__UpperCamelCase, f"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ), os.path.join(__UpperCamelCase, """eval_results_best-iteration.json""" ), )
| 151 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
lowerCamelCase_ = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
lowerCamelCase_ = [0, 25, 50]
lowerCamelCase_ = [25, 50, 75]
lowerCamelCase_ = fuzz.membership.trimf(X, abca)
lowerCamelCase_ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
lowerCamelCase_ = np.ones(75)
lowerCamelCase_ = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
lowerCamelCase_ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
lowerCamelCase_ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
lowerCamelCase_ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
lowerCamelCase_ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
lowerCamelCase_ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
lowerCamelCase_ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
lowerCamelCase_ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
lowerCamelCase_ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("Young")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("Middle aged")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("union")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("intersection")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("complement_a")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("difference a/b")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("alg_sum")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("alg_product")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("bdd_sum")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("bdd_difference")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 151 | 1 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_=13 , UpperCAmelCase_=3 , UpperCAmelCase_=True , UpperCAmelCase_=True , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=2_24 , UpperCAmelCase_=10_00 , UpperCAmelCase_=[3, 3, 6, 4] , UpperCAmelCase_=[48, 56, 1_12, 2_20] , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = num_channels
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = num_labels
snake_case_ = image_size
snake_case_ = layer_depths
snake_case_ = embed_dims
def _lowercase ( self ):
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.num_labels )
snake_case_ = self.get_config()
return config, pixel_values, labels
def _lowercase ( self ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=UpperCAmelCase_ , layer_scale_init_value=1e-5 , )
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = SwiftFormerModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = self.num_labels
snake_case_ = SwiftFormerForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
snake_case_ = SwiftFormerForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self ):
((snake_case_) , (snake_case_) , (snake_case_)) = self.prepare_config_and_inputs()
snake_case_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
snake_case = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
snake_case = (
{"""feature-extraction""": SwiftFormerModel, """image-classification""": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
snake_case = False
snake_case = False
snake_case = False
snake_case = False
snake_case = False
def _lowercase ( self ):
snake_case_ = SwiftFormerModelTester(self )
snake_case_ = ConfigTester(
self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _lowercase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def _lowercase ( self ):
pass
def _lowercase ( self ):
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(UpperCAmelCase_ )
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear ) )
def _lowercase ( self ):
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(UpperCAmelCase_ )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def _lowercase ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def _lowercase ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
@slow
def _lowercase ( self ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = SwiftFormerModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def _lowercase ( self ):
pass
def _lowercase ( self ):
def check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
snake_case_ = model_class(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) )
snake_case_ = outputs.hidden_states
snake_case_ = 8
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(UpperCAmelCase_ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def _lowercase ( self ):
def _config_zero_init(UpperCAmelCase_ ):
snake_case_ = copy.deepcopy(UpperCAmelCase_ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(UpperCAmelCase_ , UpperCAmelCase_ , 1e-1_0 )
if isinstance(getattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ ):
snake_case_ = _config_zero_init(getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return configs_no_init
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = _config_zero_init(UpperCAmelCase_ )
for model_class in self.all_model_classes:
snake_case_ = model_class(config=UpperCAmelCase_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowercase ( self ):
pass
def __snake_case ( ):
snake_case_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self ):
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def _lowercase ( self ):
snake_case_ = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(UpperCAmelCase_ )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=UpperCAmelCase_ , return_tensors="pt" ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
snake_case_ = model(**UpperCAmelCase_ )
# verify the logits
snake_case_ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
snake_case_ = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4 ) )
| 705 |
'''simple docstring'''
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def __snake_case ( lowercase : Sequence[float] , lowercase : int , lowercase : int ):
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
snake_case_ = (low + high) // 2
snake_case_ , snake_case_ , snake_case_ = max_subarray(lowercase , lowercase , lowercase )
snake_case_ , snake_case_ , snake_case_ = max_subarray(lowercase , mid + 1 , lowercase )
snake_case_ , snake_case_ , snake_case_ = max_cross_sum(lowercase , lowercase , lowercase , lowercase )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def __snake_case ( lowercase : Sequence[float] , lowercase : int , lowercase : int , lowercase : int ):
snake_case_ , snake_case_ = float("-inf" ), -1
snake_case_ , snake_case_ = float("-inf" ), -1
snake_case_ = 0
for i in range(lowercase , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
snake_case_ = summ
snake_case_ = i
snake_case_ = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
snake_case_ = summ
snake_case_ = i
return max_left, max_right, (left_sum + right_sum)
def __snake_case ( lowercase : int ):
snake_case_ = [randint(1 , lowercase ) for _ in range(lowercase )]
snake_case_ = time.time()
max_subarray(lowercase , 0 , input_size - 1 )
snake_case_ = time.time()
return end - start
def __snake_case ( ):
snake_case_ = [10, 100, 1_000, 10_000, 50_000, 100_000, 200_000, 300_000, 400_000, 500_000]
snake_case_ = [time_max_subarray(lowercase ) for input_size in input_sizes]
print("No of Inputs\t\tTime Taken" )
for input_size, runtime in zip(lowercase , lowercase ):
print(lowercase , "\t\t" , lowercase )
plt.plot(lowercase , lowercase )
plt.xlabel("Number of Inputs" )
plt.ylabel("Time taken in seconds" )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 420 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ : int = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : List[Any] = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
__magic_name__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672 |
'''simple docstring'''
import baseaa
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return baseaa.aaadecode(SCREAMING_SNAKE_CASE__ ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 672 | 1 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Any = HfArgumentParser(lowercase__ )
__SCREAMING_SNAKE_CASE : int = parser.parse_args_into_dataclasses()[0]
__SCREAMING_SNAKE_CASE : Tuple = TensorFlowBenchmark(args=lowercase__ )
try:
__SCREAMING_SNAKE_CASE : str = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__SCREAMING_SNAKE_CASE : int = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
__SCREAMING_SNAKE_CASE : Optional[int] = ''' '''.join(str(lowercase__ ).split(''' ''' )[:-1] )
__SCREAMING_SNAKE_CASE : str = ''''''
__SCREAMING_SNAKE_CASE : List[str] = eval(str(lowercase__ ).split(''' ''' )[-1] )
__SCREAMING_SNAKE_CASE : Dict = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase__ )
if len(lowercase__ ) > 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = full_error_msg + begin_error_msg + str(lowercase__ )
raise ValueError(lowercase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 260 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowerCAmelCase : Dict =logging.get_logger(__name__)
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = ['''pixel_values''']
def __init__( self :str , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Optional[Dict[str, int]] = None , lowerCAmelCase__ :PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Dict[str, int] = None , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Union[int, float] = 1 / 255 , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , **lowerCAmelCase__ :Tuple , ) -> None:
super().__init__(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = size if size is not None else {'''shortest_edge''': 256}
__SCREAMING_SNAKE_CASE : Optional[int] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = do_resize
__SCREAMING_SNAKE_CASE : str = size
__SCREAMING_SNAKE_CASE : Any = resample
__SCREAMING_SNAKE_CASE : Union[str, Any] = do_center_crop
__SCREAMING_SNAKE_CASE : Tuple = crop_size
__SCREAMING_SNAKE_CASE : List[str] = do_rescale
__SCREAMING_SNAKE_CASE : List[Any] = rescale_factor
__SCREAMING_SNAKE_CASE : Any = do_normalize
__SCREAMING_SNAKE_CASE : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__SCREAMING_SNAKE_CASE : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __magic_name__( self :Dict , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Dict[str, int] , lowerCAmelCase__ :PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :int , ) -> np.ndarray:
__SCREAMING_SNAKE_CASE : Tuple = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
__SCREAMING_SNAKE_CASE : Optional[int] = get_resize_output_image_size(lowerCAmelCase__ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase__ )
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__( self :str , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Dict[str, int] , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :Any , ) -> np.ndarray:
__SCREAMING_SNAKE_CASE : str = get_size_dict(lowerCAmelCase__ )
return center_crop(lowerCAmelCase__ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :float , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :str ) -> np.ndarray:
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__( self :Dict , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Union[float, List[float]] , lowerCAmelCase__ :Union[float, List[float]] , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :List[Any] , ) -> np.ndarray:
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :ImageInput , lowerCAmelCase__ :Optional[bool] = None , lowerCAmelCase__ :Dict[str, int] = None , lowerCAmelCase__ :PILImageResampling = None , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :Dict[str, int] = None , lowerCAmelCase__ :Optional[bool] = None , lowerCAmelCase__ :Optional[float] = None , lowerCAmelCase__ :Optional[bool] = None , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , lowerCAmelCase__ :Optional[Union[str, TensorType]] = None , lowerCAmelCase__ :Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase__ :Optional[Any] , ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = do_resize if do_resize is not None else self.do_resize
__SCREAMING_SNAKE_CASE : Tuple = size if size is not None else self.size
__SCREAMING_SNAKE_CASE : Dict = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = resample if resample is not None else self.resample
__SCREAMING_SNAKE_CASE : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
__SCREAMING_SNAKE_CASE : Optional[Any] = crop_size if crop_size is not None else self.crop_size
__SCREAMING_SNAKE_CASE : str = get_size_dict(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = do_rescale if do_rescale is not None else self.do_rescale
__SCREAMING_SNAKE_CASE : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
__SCREAMING_SNAKE_CASE : Tuple = do_normalize if do_normalize is not None else self.do_normalize
__SCREAMING_SNAKE_CASE : int = image_mean if image_mean is not None else self.image_mean
__SCREAMING_SNAKE_CASE : int = image_std if image_std is not None else self.image_std
__SCREAMING_SNAKE_CASE : int = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__SCREAMING_SNAKE_CASE : Optional[Any] = [to_numpy_array(lowerCAmelCase__ ) for image in images]
if do_resize:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images]
if do_center_crop:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [self.center_crop(image=lowerCAmelCase__ , size=lowerCAmelCase__ ) for image in images]
if do_rescale:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images]
if do_normalize:
__SCREAMING_SNAKE_CASE : int = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ ) for image in images]
__SCREAMING_SNAKE_CASE : Tuple = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
__SCREAMING_SNAKE_CASE : Dict = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
| 260 | 1 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
SCREAMING_SNAKE_CASE : List[Any] = datasets.utils.logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Dict = ["names", "prefix"]
SCREAMING_SNAKE_CASE : Dict = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"]
SCREAMING_SNAKE_CASE : List[Any] = ["encoding_errors", "on_bad_lines"]
SCREAMING_SNAKE_CASE : List[Any] = ["date_format"]
@dataclass
class UpperCamelCase ( datasets.BuilderConfig ):
'''simple docstring'''
lowercase : str =","
lowercase : Optional[str] =None
lowercase : Optional[Union[int, List[int], str]] ="infer"
lowercase : Optional[List[str]] =None
lowercase : Optional[List[str]] =None
lowercase : Optional[Union[int, str, List[int], List[str]]] =None
lowercase : Optional[Union[List[int], List[str]]] =None
lowercase : Optional[str] =None
lowercase : bool =True
lowercase : Optional[Literal["c", "python", "pyarrow"]] =None
lowercase : Dict[Union[int, str], Callable[[Any], Any]] =None
lowercase : Optional[list] =None
lowercase : Optional[list] =None
lowercase : bool =False
lowercase : Optional[Union[int, List[int]]] =None
lowercase : Optional[int] =None
lowercase : Optional[Union[str, List[str]]] =None
lowercase : bool =True
lowercase : bool =True
lowercase : bool =False
lowercase : bool =True
lowercase : Optional[str] =None
lowercase : str ="."
lowercase : Optional[str] =None
lowercase : str ='"'
lowercase : int =0
lowercase : Optional[str] =None
lowercase : Optional[str] =None
lowercase : Optional[str] =None
lowercase : Optional[str] =None
lowercase : bool =True
lowercase : bool =True
lowercase : int =0
lowercase : bool =True
lowercase : bool =False
lowercase : Optional[str] =None
lowercase : int =10000
lowercase : Optional[datasets.Features] =None
lowercase : Optional[str] ="strict"
lowercase : Literal["error", "warn", "skip"] ="error"
lowercase : Optional[str] =None
def UpperCamelCase ( self ):
if self.delimiter is not None:
lowercase_ :Any = self.delimiter
if self.column_names is not None:
lowercase_ :Any = self.column_names
@property
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = {
'''sep''': self.sep,
'''header''': self.header,
'''names''': self.names,
'''index_col''': self.index_col,
'''usecols''': self.usecols,
'''prefix''': self.prefix,
'''mangle_dupe_cols''': self.mangle_dupe_cols,
'''engine''': self.engine,
'''converters''': self.converters,
'''true_values''': self.true_values,
'''false_values''': self.false_values,
'''skipinitialspace''': self.skipinitialspace,
'''skiprows''': self.skiprows,
'''nrows''': self.nrows,
'''na_values''': self.na_values,
'''keep_default_na''': self.keep_default_na,
'''na_filter''': self.na_filter,
'''verbose''': self.verbose,
'''skip_blank_lines''': self.skip_blank_lines,
'''thousands''': self.thousands,
'''decimal''': self.decimal,
'''lineterminator''': self.lineterminator,
'''quotechar''': self.quotechar,
'''quoting''': self.quoting,
'''escapechar''': self.escapechar,
'''comment''': self.comment,
'''encoding''': self.encoding,
'''dialect''': self.dialect,
'''error_bad_lines''': self.error_bad_lines,
'''warn_bad_lines''': self.warn_bad_lines,
'''skipfooter''': self.skipfooter,
'''doublequote''': self.doublequote,
'''memory_map''': self.memory_map,
'''float_precision''': self.float_precision,
'''chunksize''': self.chunksize,
'''encoding_errors''': self.encoding_errors,
'''on_bad_lines''': self.on_bad_lines,
'''date_format''': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , UpperCamelCase_ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class UpperCamelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
lowercase : Any =CsvConfig
def UpperCamelCase ( self ):
return datasets.DatasetInfo(features=self.config.features )
def UpperCamelCase ( self , UpperCamelCase_ ):
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" )
lowercase_ :Dict = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCamelCase_ , (str, list, tuple) ):
lowercase_ :Optional[int] = data_files
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Any = [files]
lowercase_ :List[str] = [dl_manager.iter_files(UpperCamelCase_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
lowercase_ :str = []
for split_name, files in data_files.items():
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :List[Any] = [files]
lowercase_ :Any = [dl_manager.iter_files(UpperCamelCase_ ) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCamelCase_ , gen_kwargs={'''files''': files} ) )
return splits
def UpperCamelCase ( self , UpperCamelCase_ ):
if self.config.features is not None:
lowercase_ :Optional[int] = self.config.features.arrow_schema
if all(not require_storage_cast(UpperCamelCase_ ) for feature in self.config.features.values() ):
# cheaper cast
lowercase_ :Optional[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=UpperCamelCase_ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
lowercase_ :Any = table_cast(UpperCamelCase_ , UpperCamelCase_ )
return pa_table
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :Optional[int] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
lowercase_ :Tuple = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(UpperCamelCase_ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCamelCase_ ) ):
lowercase_ :str = pd.read_csv(UpperCamelCase_ , iterator=UpperCamelCase_ , dtype=UpperCamelCase_ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(UpperCamelCase_ ):
lowercase_ :Union[str, Any] = pa.Table.from_pandas(UpperCamelCase_ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCamelCase_ )
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(UpperCamelCase_ )}: {e}" )
raise
| 257 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def UpperCamelCase ( _a ) -> List[str]:
'''simple docstring'''
lowercase_ :Union[str, Any] = tmp_path / '''file.csv'''
lowercase_ :List[str] = textwrap.dedent(
'''\
header1,header2
1,2
10,20
''' )
with open(_a , '''w''' ) as f:
f.write(_a )
return str(_a )
@pytest.fixture
def UpperCamelCase ( _a ) -> Optional[Any]:
'''simple docstring'''
lowercase_ :List[Any] = tmp_path / '''malformed_file.csv'''
lowercase_ :List[str] = textwrap.dedent(
'''\
header1,header2
1,2
10,20,
''' )
with open(_a , '''w''' ) as f:
f.write(_a )
return str(_a )
@pytest.fixture
def UpperCamelCase ( _a , _a ) -> Optional[Any]:
'''simple docstring'''
lowercase_ :Dict = tmp_path / '''csv_with_image.csv'''
lowercase_ :int = textwrap.dedent(
f"\\n image\n {image_file}\n " )
with open(_a , '''w''' ) as f:
f.write(_a )
return str(_a )
@pytest.fixture
def UpperCamelCase ( _a ) -> str:
'''simple docstring'''
lowercase_ :int = tmp_path / '''csv_with_label.csv'''
lowercase_ :int = textwrap.dedent(
'''\
label
good
bad
good
''' )
with open(_a , '''w''' ) as f:
f.write(_a )
return str(_a )
@pytest.fixture
def UpperCamelCase ( _a ) -> str:
'''simple docstring'''
lowercase_ :Dict = tmp_path / '''csv_with_int_list.csv'''
lowercase_ :Any = textwrap.dedent(
'''\
int_list
1 2 3
4 5 6
7 8 9
''' )
with open(_a , '''w''' ) as f:
f.write(_a )
return str(_a )
def UpperCamelCase ( _a , _a , _a ) -> Any:
'''simple docstring'''
lowercase_ :Dict = Csv()
lowercase_ :Tuple = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_a , match='''Error tokenizing data''' ):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(_a ) in record.message
for record in caplog.records )
@require_pil
def UpperCamelCase ( _a ) -> Tuple:
'''simple docstring'''
with open(_a , encoding='''utf-8''' ) as f:
lowercase_ :Any = f.read().splitlines()[1]
lowercase_ :List[Any] = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()} ) )
lowercase_ :List[Any] = csv._generate_tables([[csv_file_with_image]] )
lowercase_ :Dict = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''image''' ).type == Image()()
lowercase_ :Optional[int] = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def UpperCamelCase ( _a ) -> int:
'''simple docstring'''
with open(_a , encoding='''utf-8''' ) as f:
lowercase_ :List[Any] = f.read().splitlines()[1:]
lowercase_ :Optional[Any] = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) )
lowercase_ :List[Any] = csv._generate_tables([[csv_file_with_label]] )
lowercase_ :Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )()
lowercase_ :int = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(_a ) for label in labels]
def UpperCamelCase ( _a ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ :int = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda _a : [int(_a ) for i in x.split()]} )
lowercase_ :Any = csv._generate_tables([[csv_file_with_int_list]] )
lowercase_ :Union[str, Any] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type )
lowercase_ :List[str] = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 257 | 1 |
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowerCAmelCase : Union[str, Any] = object()
# For specifying empty leaf dict `{}`
lowerCAmelCase : Any = object()
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : str = tuple((re.compile(x + '$' ) for x in qs) )
for i in range(len(_A ) - len(_A ) + 1 ):
_lowerCAmelCase : Dict = [x.match(_A ) for x, y in zip(_A , ks[i:] )]
if matches and all(_A ):
return True
return False
def lowercase (_A ):
"""simple docstring"""
def replace(_A , _A ):
for rule, replacement in rules:
if _match(_A , _A ):
return replacement
return val
return replace
def lowercase ():
"""simple docstring"""
return [
# embeddings
(("transformer", "wpe", "embedding"), P('mp' , _A )),
(("transformer", "wte", "embedding"), P('mp' , _A )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(_A , 'mp' )),
(("attention", "out_proj", "kernel"), P('mp' , _A )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(_A , 'mp' )),
(("mlp", "c_fc", "bias"), P('mp' )),
(("mlp", "c_proj", "kernel"), P('mp' , _A )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Any = _get_partition_rules()
_lowerCAmelCase : Tuple = _replacement_rules(_A )
_lowerCAmelCase : Tuple = {k: _unmatched for k in flatten_dict(_A )}
_lowerCAmelCase : List[Any] = {k: replace(_A , _A ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(_A ) )
| 630 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowerCAmelCase : int = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowerCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 630 | 1 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class lowercase_ :
"""simple docstring"""
snake_case_ = LEDConfig
snake_case_ = {}
snake_case_ = '''gelu'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=20 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=4 , ):
"""simple docstring"""
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = eos_token_id
a_ = pad_token_id
a_ = bos_token_id
a_ = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
a_ = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
a_ = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def lowercase__ ( self ):
"""simple docstring"""
a_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
a_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
a_ = tf.concat([input_ids, eos_tensor] , axis=1 )
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
a_ = prepare_led_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
a_ = tf.concat(
[tf.zeros_like(_UpperCAmelCase )[:, :-1], tf.ones_like(_UpperCAmelCase )[:, -1:]] , axis=-1 , )
a_ = global_attention_mask
return config, inputs_dict
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
a_ = TFLEDModel(config=_UpperCAmelCase ).get_decoder()
a_ = inputs_dict["""input_ids"""]
a_ = input_ids[:1, :]
a_ = inputs_dict["""attention_mask"""][:1, :]
a_ = 1
# first forward pass
a_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase )
a_ , a_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
a_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
a_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
a_ = tf.concat([input_ids, next_tokens] , axis=-1 )
a_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
a_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0]
a_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
a_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
a_ = output_from_no_past[:, -3:, random_slice_idx]
a_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_UpperCAmelCase , _UpperCAmelCase , rtol=1e-3 )
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , ):
"""simple docstring"""
if attention_mask is None:
a_ = tf.cast(tf.math.not_equal(UpperCAmelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
a_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
a_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
a_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class lowercase_ ( UpperCamelCase__ ,UpperCamelCase__ ,unittest.TestCase):
"""simple docstring"""
snake_case_ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
snake_case_ = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
snake_case_ = (
{
'''conversational''': TFLEDForConditionalGeneration,
'''feature-extraction''': TFLEDModel,
'''summarization''': TFLEDForConditionalGeneration,
'''text2text-generation''': TFLEDForConditionalGeneration,
'''translation''': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = False
def lowercase__ ( self ):
"""simple docstring"""
a_ = TFLEDModelTester(self )
a_ = ConfigTester(self , config_class=_UpperCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ):
"""simple docstring"""
a_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = tf.zeros_like(inputs_dict["""attention_mask"""] )
a_ = 2
a_ = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["""global_attention_mask"""] , )
a_ = True
a_ = self.model_tester.seq_length
a_ = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(_UpperCAmelCase ):
a_ = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(_UpperCAmelCase ):
a_ = [t.numpy() for t in outputs.encoder_attentions]
a_ = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
a_ = True
a_ = False
a_ = False
a_ = model_class(_UpperCAmelCase )
a_ = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
a_ = len(_UpperCAmelCase )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
if self.is_encoder_decoder:
a_ = model_class(_UpperCAmelCase )
a_ = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_decoder_attentions_output(_UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
a_ = True
a_ = model_class(_UpperCAmelCase )
a_ = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
# Check attention is always last and order is fine
a_ = True
a_ = True
a_ = model_class(_UpperCAmelCase )
a_ = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
@unittest.skip("""LED keeps using potentially symbolic tensors in conditionals and breaks tracing.""" )
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowerCamelCase_ ( UpperCAmelCase__ ):
"""simple docstring"""
return tf.constant(UpperCAmelCase__ , dtype=tf.intaa )
A_ : Optional[int] =1e-4
@slow
@require_tf
class lowercase_ ( unittest.TestCase):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
a_ = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" ).led
# change to intended input here
a_ = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
a_ = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
a_ = prepare_led_inputs_dict(model.config , _UpperCAmelCase , _UpperCAmelCase )
a_ = model(**_UpperCAmelCase )[0]
a_ = (1, 1_024, 768)
self.assertEqual(output.shape , _UpperCAmelCase )
# change to expected output here
a_ = tf.convert_to_tensor(
[[2.3_0_5_0, 2.8_2_7_9, 0.6_5_3_1], [-1.8_4_5_7, -0.1_4_5_5, -3.5_6_6_1], [-1.0_1_8_6, 0.4_5_8_6, -2.2_0_4_3]] , )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-3 )
def lowercase__ ( self ):
"""simple docstring"""
a_ = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" )
# change to intended input here
a_ = _long_tensor([512 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
a_ = _long_tensor([128 * [0, 31_414, 232, 328, 740, 1_140, 12_695, 69]] )
a_ = prepare_led_inputs_dict(model.config , _UpperCAmelCase , _UpperCAmelCase )
a_ = model(**_UpperCAmelCase )[0]
a_ = (1, 1_024, model.config.vocab_size)
self.assertEqual(output.shape , _UpperCAmelCase )
# change to expected output here
a_ = tf.convert_to_tensor(
[[3_3.6_5_0_7, 6.4_5_7_2, 1_6.8_0_8_9], [5.8_7_3_9, -2.4_2_3_8, 1_1.2_9_0_2], [-3.2_1_3_9, -4.3_1_4_9, 4.2_7_8_3]] , )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-3 , rtol=1e-3 ) | 483 |
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class lowercase_ :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=sys.maxsize ):
"""simple docstring"""
a_ = """bilinear"""
a_ = max_size
a_ = short_edge_length
def __call__( self , _UpperCAmelCase ):
"""simple docstring"""
a_ = []
for img in imgs:
a_ , a_ = img.shape[:2]
# later: provide list and randomly choose index for resize
a_ = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
a_ = size * 1.0 / min(_UpperCAmelCase , _UpperCAmelCase )
if h < w:
a_ , a_ = size, scale * w
else:
a_ , a_ = scale * h, size
if max(_UpperCAmelCase , _UpperCAmelCase ) > self.max_size:
a_ = self.max_size * 1.0 / max(_UpperCAmelCase , _UpperCAmelCase )
a_ = newh * scale
a_ = neww * scale
a_ = int(neww + 0.5 )
a_ = int(newh + 0.5 )
if img.dtype == np.uinta:
a_ = Image.fromarray(_UpperCAmelCase )
a_ = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
a_ = np.asarray(_UpperCAmelCase )
else:
a_ = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
a_ = nn.functional.interpolate(
_UpperCAmelCase , (newh, neww) , mode=self.interp_method , align_corners=_UpperCAmelCase ).squeeze(0 )
img_augs.append(_UpperCAmelCase )
return img_augs
class lowercase_ :
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
"""simple docstring"""
a_ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
a_ = cfg.INPUT.FORMAT
a_ = cfg.SIZE_DIVISIBILITY
a_ = cfg.PAD_VALUE
a_ = cfg.INPUT.MAX_SIZE_TEST
a_ = cfg.MODEL.DEVICE
a_ = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
a_ = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
a_ = lambda _UpperCAmelCase : (x - self.pixel_mean) / self.pixel_std
def lowercase__ ( self , _UpperCAmelCase ):
"""simple docstring"""
a_ = tuple(max(_UpperCAmelCase ) for s in zip(*[img.shape for img in images] ) )
a_ = [im.shape[-2:] for im in images]
a_ = [
nn.functional.pad(
_UpperCAmelCase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(_UpperCAmelCase , _UpperCAmelCase )
]
return torch.stack(_UpperCAmelCase ), torch.tensor(_UpperCAmelCase )
def __call__( self , _UpperCAmelCase , _UpperCAmelCase=False ):
"""simple docstring"""
with torch.no_grad():
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
a_ = [images]
if single_image:
assert len(_UpperCAmelCase ) == 1
for i in range(len(_UpperCAmelCase ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(_UpperCAmelCase , images.pop(_UpperCAmelCase ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
_UpperCAmelCase , torch.as_tensor(img_tensorize(images.pop(_UpperCAmelCase ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
a_ = torch.tensor([im.shape[:2] for im in images] )
a_ = self.aug(_UpperCAmelCase )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
a_ = [self.normalizer(_UpperCAmelCase ) for x in images]
# now pad them to do the following operations
a_ , a_ = self.pad(_UpperCAmelCase )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
a_ = torch.true_divide(_UpperCAmelCase , _UpperCAmelCase )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def lowerCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
assert torch.isfinite(UpperCAmelCase__ ).all(), "Box tensor contains infinite or NaN!"
a_ , a_ = box_size
tensor[:, 0].clamp_(min=0 , max=UpperCAmelCase__ )
tensor[:, 1].clamp_(min=0 , max=UpperCAmelCase__ )
tensor[:, 2].clamp_(min=0 , max=UpperCAmelCase__ )
tensor[:, 3].clamp_(min=0 , max=UpperCAmelCase__ ) | 483 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class UpperCAmelCase__ :
def __init__( self : List[Any],__A : Any,__A : Optional[int]=1_3,__A : str=7,__A : Any=True,__A : Optional[Any]=True,__A : Optional[int]=True,__A : Optional[Any]=True,__A : List[Any]=9_9,__A : str=[1, 1, 2],__A : Tuple=1,__A : Tuple=3_2,__A : str=4,__A : Dict=8,__A : Any=3_7,__A : Dict="gelu_new",__A : Optional[int]=0.1,__A : int=0.1,__A : str=0.0,__A : Tuple=5_1_2,__A : List[Any]=3,__A : int=0.02,__A : List[str]=3,__A : Union[str, Any]=4,__A : Optional[int]=None,__A : Optional[Any]=False,):
_lowerCamelCase : List[str] = parent
_lowerCamelCase : Union[str, Any] = batch_size
_lowerCamelCase : Tuple = seq_length
_lowerCamelCase : int = is_training
_lowerCamelCase : int = use_input_mask
_lowerCamelCase : Optional[Any] = use_token_type_ids
_lowerCamelCase : Dict = use_labels
_lowerCamelCase : Tuple = vocab_size
_lowerCamelCase : Optional[int] = block_sizes
_lowerCamelCase : Optional[Any] = num_decoder_layers
_lowerCamelCase : Union[str, Any] = d_model
_lowerCamelCase : Tuple = n_head
_lowerCamelCase : Optional[Any] = d_head
_lowerCamelCase : Optional[int] = d_inner
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Dict = hidden_dropout
_lowerCamelCase : List[str] = attention_dropout
_lowerCamelCase : Dict = activation_dropout
_lowerCamelCase : List[str] = max_position_embeddings
_lowerCamelCase : List[str] = type_vocab_size
_lowerCamelCase : List[str] = 2
_lowerCamelCase : Optional[Any] = num_labels
_lowerCamelCase : Tuple = num_choices
_lowerCamelCase : Dict = scope
_lowerCamelCase : int = initializer_std
# Used in the tests to check the size of the first attention layer
_lowerCamelCase : Any = n_head
# Used in the tests to check the size of the first hidden state
_lowerCamelCase : List[str] = self.d_model
# Used in the tests to check the number of output hidden states/attentions
_lowerCamelCase : Tuple = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
_lowerCamelCase : Any = self.num_hidden_layers + 2
def lowerCamelCase_ ( self : Union[str, Any] ):
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
_lowerCamelCase : Dict = None
if self.use_input_mask:
_lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase : int = None
if self.use_token_type_ids:
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
_lowerCamelCase : int = None
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : int = None
if self.use_labels:
_lowerCamelCase : str = ids_tensor([self.batch_size],self.type_sequence_label_size )
_lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
_lowerCamelCase : Tuple = ids_tensor([self.batch_size],self.num_choices )
_lowerCamelCase : str = FunnelConfig(
vocab_size=self.vocab_size,block_sizes=self.block_sizes,num_decoder_layers=self.num_decoder_layers,d_model=self.d_model,n_head=self.n_head,d_head=self.d_head,d_inner=self.d_inner,hidden_act=self.hidden_act,hidden_dropout=self.hidden_dropout,attention_dropout=self.attention_dropout,activation_dropout=self.activation_dropout,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_std=self.initializer_std,)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase_ ( self : List[str],__A : Union[str, Any],__A : Any,__A : Tuple,__A : List[str],__A : List[str],__A : Optional[Any],__A : Optional[Any],):
_lowerCamelCase : Optional[int] = TFFunnelModel(config=UpperCAmelCase__ )
_lowerCamelCase : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_lowerCamelCase : Union[str, Any] = model(UpperCAmelCase__ )
_lowerCamelCase : Tuple = [input_ids, input_mask]
_lowerCamelCase : Dict = model(UpperCAmelCase__ )
_lowerCamelCase : List[Any] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.d_model) )
_lowerCamelCase : str = False
_lowerCamelCase : Union[str, Any] = TFFunnelModel(config=UpperCAmelCase__ )
_lowerCamelCase : Optional[Any] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.d_model) )
_lowerCamelCase : List[Any] = False
_lowerCamelCase : Dict = TFFunnelModel(config=UpperCAmelCase__ )
_lowerCamelCase : Tuple = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.d_model) )
def lowerCamelCase_ ( self : Optional[Any],__A : List[str],__A : List[str],__A : List[Any],__A : Optional[int],__A : Union[str, Any],__A : Optional[Any],__A : str,):
_lowerCamelCase : int = TFFunnelBaseModel(config=UpperCAmelCase__ )
_lowerCamelCase : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_lowerCamelCase : Optional[int] = model(UpperCAmelCase__ )
_lowerCamelCase : Optional[int] = [input_ids, input_mask]
_lowerCamelCase : List[str] = model(UpperCAmelCase__ )
_lowerCamelCase : Tuple = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, 2, self.d_model) )
_lowerCamelCase : List[str] = False
_lowerCamelCase : Union[str, Any] = TFFunnelBaseModel(config=UpperCAmelCase__ )
_lowerCamelCase : Tuple = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, 3, self.d_model) )
_lowerCamelCase : Any = False
_lowerCamelCase : int = TFFunnelBaseModel(config=UpperCAmelCase__ )
_lowerCamelCase : int = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, 2, self.d_model) )
def lowerCamelCase_ ( self : List[str],__A : Any,__A : Union[str, Any],__A : Any,__A : Dict,__A : List[str],__A : int,__A : Dict,):
_lowerCamelCase : Dict = TFFunnelForPreTraining(config=UpperCAmelCase__ )
_lowerCamelCase : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_lowerCamelCase : List[Any] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : Union[str, Any],__A : int,__A : int,__A : Dict,__A : List[str],__A : List[str],__A : Optional[int],__A : int,):
_lowerCamelCase : List[str] = TFFunnelForMaskedLM(config=UpperCAmelCase__ )
_lowerCamelCase : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_lowerCamelCase : Tuple = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : Tuple,__A : Dict,__A : Union[str, Any],__A : List[str],__A : List[str],__A : Union[str, Any],__A : str,__A : Any,):
_lowerCamelCase : Any = self.num_labels
_lowerCamelCase : Dict = TFFunnelForSequenceClassification(config=UpperCAmelCase__ )
_lowerCamelCase : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_lowerCamelCase : List[str] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : int,__A : Union[str, Any],__A : Union[str, Any],__A : Any,__A : int,__A : List[str],__A : List[Any],__A : str,):
_lowerCamelCase : Dict = self.num_choices
_lowerCamelCase : Tuple = TFFunnelForMultipleChoice(config=UpperCAmelCase__ )
_lowerCamelCase : str = tf.tile(tf.expand_dims(UpperCAmelCase__,1 ),(1, self.num_choices, 1) )
_lowerCamelCase : List[Any] = tf.tile(tf.expand_dims(UpperCAmelCase__,1 ),(1, self.num_choices, 1) )
_lowerCamelCase : Dict = tf.tile(tf.expand_dims(UpperCAmelCase__,1 ),(1, self.num_choices, 1) )
_lowerCamelCase : Union[str, Any] = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
_lowerCamelCase : Dict = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self : int,__A : Optional[int],__A : List[str],__A : Optional[Any],__A : str,__A : int,__A : Any,__A : Optional[Any],):
_lowerCamelCase : str = self.num_labels
_lowerCamelCase : str = TFFunnelForTokenClassification(config=UpperCAmelCase__ )
_lowerCamelCase : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_lowerCamelCase : Dict = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self : Dict,__A : List[Any],__A : List[Any],__A : Union[str, Any],__A : Optional[Any],__A : Dict,__A : List[str],__A : Optional[int],):
_lowerCamelCase : Optional[int] = TFFunnelForQuestionAnswering(config=UpperCAmelCase__ )
_lowerCamelCase : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
_lowerCamelCase : List[Any] = model(UpperCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : Optional[Any] ):
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
(
_lowerCamelCase
) : List[Any] = config_and_inputs
_lowerCamelCase : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A , A , unittest.TestCase ):
lowerCAmelCase_ = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Optional[int] = TFFunnelModelTester(self )
_lowerCamelCase : Union[str, Any] = ConfigTester(self,config_class=UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : int ):
_lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ )
@require_tf
class UpperCAmelCase__ ( A , unittest.TestCase ):
lowerCAmelCase_ = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowerCamelCase_ ( self : List[Any] ):
_lowerCamelCase : Tuple = TFFunnelModelTester(self,base=UpperCAmelCase__ )
_lowerCamelCase : Any = ConfigTester(self,config_class=UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Tuple ):
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : str ):
_lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ ) | 717 |
'''simple docstring'''
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[int] = {'vocab_file': 'spiece.model'}
UpperCAmelCase_ : Any = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
UpperCAmelCase_ : str = {
'AI-Sweden/gpt-sw3-126m': 2048,
'AI-Sweden/gpt-sw3-350m': 2048,
'AI-Sweden/gpt-sw3-1.6b': 2048,
'AI-Sweden/gpt-sw3-6.7b': 2048,
'AI-Sweden/gpt-sw3-20b': 2048,
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['input_ids', 'attention_mask']
def __init__( self : Dict,__A : List[str],__A : Any=False,__A : Tuple=False,__A : Dict=False,__A : str=None,__A : List[str]=None,__A : Any=None,__A : str=None,__A : Optional[Dict[str, Any]] = None,**__A : str,):
_lowerCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCamelCase : int = kwargs.get("name_or_path" )
if name_or_path is None:
logger.warning(
"name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,"
" you are testing the model, this can safely be ignored" )
_lowerCamelCase : Union[str, Any] = "None"
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
_lowerCamelCase : Tuple = "<|endoftext|>" if eos_token is None else eos_token
_lowerCamelCase : List[str] = "<unk>" if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
_lowerCamelCase : Union[str, Any] = unk_token if pad_token is None else pad_token
_lowerCamelCase : str = eos_token if bos_token is None else bos_token
else:
_lowerCamelCase : List[str] = "<pad>" if pad_token is None else pad_token
_lowerCamelCase : str = "<s>" if bos_token is None else bos_token
super().__init__(
do_lower_case=__A,remove_space=__A,keep_accents=__A,bos_token=__A,eos_token=__A,unk_token=__A,pad_token=__A,sp_model_kwargs=self.sp_model_kwargs,**__A,)
_lowerCamelCase : Union[str, Any] = do_lower_case
_lowerCamelCase : List[Any] = remove_space
_lowerCamelCase : str = keep_accents
_lowerCamelCase : List[Any] = vocab_file
_lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
# Used for whitespace normalization in input texts
# fmt : off
_lowerCamelCase : Union[str, Any] = {" ", " ", " ", " ", " ", " ", " ", " ", " ", " ", "", ""}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
_lowerCamelCase : int = re.compile(
f'[{"".join(map(__A,list(range(0,9 ) ) + list(range(1_1,3_2 ) ) + list(range(1_2_7,1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]' )
def __getstate__( self : Dict ):
_lowerCamelCase : int = self.__dict__.copy()
_lowerCamelCase : Optional[Any] = None
return state
def __setstate__( self : Tuple,__A : int ):
_lowerCamelCase : Optional[int] = d
# for backward compatibility
if not hasattr(self,"sp_model_kwargs" ):
_lowerCamelCase : List[str] = {}
_lowerCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase_ ( self : int ):
return len(self.sp_model )
def lowerCamelCase_ ( self : Dict,__A : str ):
_lowerCamelCase : Union[str, Any] = self.non_printing_characters_re.sub("",__A )
# Normalize whitespaces
_lowerCamelCase : Optional[Any] = "".join([char if char not in self.whitespaces else " " for char in text] )
# NFC Unicode normalization
_lowerCamelCase : Optional[Any] = unicodedata.normalize("NFC",__A )
return text
def lowerCamelCase_ ( self : Union[str, Any],__A : str,**__A : Optional[int] ):
_lowerCamelCase : str = self.preprocess_text(__A )
return self.sp_model.encode(__A,out_type=__A )
def lowerCamelCase_ ( self : int,__A : str ):
return self.sp_model.PieceToId(__A )
def lowerCamelCase_ ( self : Optional[int],__A : int ):
return self.sp_model.IdToPiece(__A )
@staticmethod
def lowerCamelCase_ ( __A : str ):
return out_string
def lowerCamelCase_ ( self : str,__A : List[str] ):
_lowerCamelCase : str = []
_lowerCamelCase : List[Any] = ""
_lowerCamelCase : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Optional[Any] = []
else:
current_sub_tokens.append(__A )
_lowerCamelCase : str = False
out_string += self.sp_model.decode(__A )
return out_string
def lowerCamelCase_ ( self : Any ):
_lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self : Optional[Any],__A : str,__A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCamelCase : List[Any] = os.path.join(
__A,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,__A )
elif not os.path.isfile(self.vocab_file ):
with open(__A,"wb" ) as fi:
_lowerCamelCase : str = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def lowerCamelCase_ ( self : Optional[int],__A : Union[str, List[str]],__A : Union[str, bool] = False ):
if isinstance(__A,__A ):
_lowerCamelCase : List[Any] = self.preprocess_text(__A )
_lowerCamelCase : Optional[Any] = self.sp_model.encode(__A )
else:
_lowerCamelCase : List[str] = [self.preprocess_text(__A ) for t in text]
_lowerCamelCase : int = self.sp_model.encode(__A )
if return_tensors is True or return_tensors == "pt":
_lowerCamelCase : str = torch.tensor(__A )
return token_ids
def lowerCamelCase_ ( self : List[Any],__A : Union[int, List[int]] ):
return self.sp_model.decode(__A )
def lowerCamelCase_ ( self : Optional[int],__A : "Conversation" ):
_lowerCamelCase : Any = [f'User: {text}' if is_user else f'Bot: {text}' for is_user, text in conversation.iter_texts()]
_lowerCamelCase : Tuple = (
f'{self.eos_token}{self.bos_token}' + f'{self.bos_token}'.join(__A ) + f'{self.bos_token}Bot:'
)
return self.encode(text=__A ) | 11 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class UpperCamelCase__ :
'''simple docstring'''
lowerCamelCase_ : CommonSchedulerState
# setable values
lowerCamelCase_ : jnp.ndarray
lowerCamelCase_ : jnp.ndarray
lowerCamelCase_ : Optional[int] = None
@classmethod
def _lowercase ( cls , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
return cls(common=UpperCamelCase__ , init_noise_sigma=UpperCamelCase__ , timesteps=UpperCamelCase__ )
@dataclass
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : DDPMSchedulerState
class UpperCamelCase__ (lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Tuple = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowerCamelCase_ : jnp.dtype
@property
def _lowercase ( self ) -> Union[str, Any]:
return True
@register_to_config
def __init__( self , UpperCamelCase__ = 1000 , UpperCamelCase__ = 0.0001 , UpperCamelCase__ = 0.02 , UpperCamelCase__ = "linear" , UpperCamelCase__ = None , UpperCamelCase__ = "fixed_small" , UpperCamelCase__ = True , UpperCamelCase__ = "epsilon" , UpperCamelCase__ = jnp.floataa , ) -> Tuple:
lowerCamelCase : Optional[int] = dtype
def _lowercase ( self , UpperCamelCase__ = None ) -> DDPMSchedulerState:
if common is None:
lowerCamelCase : Dict = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowerCamelCase : List[str] = jnp.array(1.0 , dtype=self.dtype )
lowerCamelCase : List[str] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=UpperCamelCase__ , init_noise_sigma=UpperCamelCase__ , timesteps=UpperCamelCase__ , )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None ) -> jnp.ndarray:
return sample
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = () ) -> DDPMSchedulerState:
lowerCamelCase : Optional[Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowerCamelCase : str = (jnp.arange(0 , UpperCamelCase__ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=UpperCamelCase__ , timesteps=UpperCamelCase__ , )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None ) -> Optional[Any]:
lowerCamelCase : Tuple = state.common.alphas_cumprod[t]
lowerCamelCase : List[Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowerCamelCase : int = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowerCamelCase : Tuple = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowerCamelCase : Tuple = jnp.clip(UpperCamelCase__ , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowerCamelCase : Optional[Any] = jnp.log(jnp.clip(UpperCamelCase__ , a_min=1e-20 ) )
elif variance_type == "fixed_large":
lowerCamelCase : Tuple = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowerCamelCase : List[str] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowerCamelCase : Tuple = variance
lowerCamelCase : int = state.common.betas[t]
lowerCamelCase : Dict = (predicted_variance + 1) / 2
lowerCamelCase : int = frac * max_log + (1 - frac) * min_log
return variance
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
lowerCamelCase : List[Any] = timestep
if key is None:
lowerCamelCase : Optional[int] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowerCamelCase , lowerCamelCase : int = jnp.split(UpperCamelCase__ , sample.shape[1] , axis=1 )
else:
lowerCamelCase : int = None
# 1. compute alphas, betas
lowerCamelCase : str = state.common.alphas_cumprod[t]
lowerCamelCase : Any = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowerCamelCase : List[str] = 1 - alpha_prod_t
lowerCamelCase : int = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowerCamelCase : Dict = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowerCamelCase : List[Any] = model_output
elif self.config.prediction_type == "v_prediction":
lowerCamelCase : Dict = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowerCamelCase : Any = jnp.clip(UpperCamelCase__ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase : Any = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowerCamelCase : List[Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase : str = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowerCamelCase : Dict = jax.random.split(UpperCamelCase__ , num=1 )
lowerCamelCase : Optional[Any] = jax.random.normal(UpperCamelCase__ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(UpperCamelCase__ , UpperCamelCase__ , predicted_variance=UpperCamelCase__ ) ** 0.5) * noise
lowerCamelCase : str = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowerCamelCase : Optional[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=UpperCamelCase__ , state=UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> jnp.ndarray:
return add_noise_common(state.common , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> jnp.ndarray:
return get_velocity_common(state.common , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __len__( self ) -> List[Any]:
return self.config.num_train_timesteps
| 311 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self , UpperCamelCase__ ) -> Tuple:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
lowerCamelCase : Union[str, Any] = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(UpperCamelCase__ )
def _lowercase ( self ) -> Tuple:
lowerCamelCase : Tuple = "sshleifer/tiny-gpt2"
lowerCamelCase : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
lowerCamelCase : Dict = PyTorchBenchmark(UpperCamelCase__ )
lowerCamelCase : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self ) -> int:
lowerCamelCase : List[str] = "sgugger/tiny-distilbert-classification"
lowerCamelCase : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , only_pretrain_model=UpperCamelCase__ , )
lowerCamelCase : Tuple = PyTorchBenchmark(UpperCamelCase__ )
lowerCamelCase : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self ) -> int:
lowerCamelCase : Dict = "sshleifer/tiny-gpt2"
lowerCamelCase : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , torchscript=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
lowerCamelCase : Union[str, Any] = PyTorchBenchmark(UpperCamelCase__ )
lowerCamelCase : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def _lowercase ( self ) -> Optional[Any]:
lowerCamelCase : Any = "sshleifer/tiny-gpt2"
lowerCamelCase : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , fpaa=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
lowerCamelCase : Optional[int] = PyTorchBenchmark(UpperCamelCase__ )
lowerCamelCase : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase : int = "sshleifer/tiny-gpt2"
lowerCamelCase : List[str] = AutoConfig.from_pretrained(UpperCamelCase__ )
# set architectures equal to `None`
lowerCamelCase : Union[str, Any] = None
lowerCamelCase : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
lowerCamelCase : List[Any] = PyTorchBenchmark(UpperCamelCase__ , configs=[config] )
lowerCamelCase : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self ) -> str:
lowerCamelCase : Optional[int] = "sshleifer/tiny-gpt2"
lowerCamelCase : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
lowerCamelCase : int = PyTorchBenchmark(UpperCamelCase__ )
lowerCamelCase : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == "cpu" , "Can't do half precision" )
def _lowercase ( self ) -> Dict:
lowerCamelCase : List[str] = "sshleifer/tiny-gpt2"
lowerCamelCase : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , fpaa=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
lowerCamelCase : Dict = PyTorchBenchmark(UpperCamelCase__ )
lowerCamelCase : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowercase ( self ) -> Union[str, Any]:
lowerCamelCase : Union[str, Any] = "sshleifer/tiny-gpt2"
lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(UpperCamelCase__ )
lowerCamelCase : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
lowerCamelCase : Dict = PyTorchBenchmark(UpperCamelCase__ , configs=[config] )
lowerCamelCase : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self ) -> int:
lowerCamelCase : Tuple = "sshleifer/tinier_bart"
lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(UpperCamelCase__ )
lowerCamelCase : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
lowerCamelCase : Any = PyTorchBenchmark(UpperCamelCase__ , configs=[config] )
lowerCamelCase : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _lowercase ( self ) -> Any:
lowerCamelCase : List[Any] = "sshleifer/tiny-gpt2"
lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(UpperCamelCase__ )
lowerCamelCase : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
lowerCamelCase : int = PyTorchBenchmark(UpperCamelCase__ , configs=[config] )
lowerCamelCase : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowercase ( self ) -> Dict:
lowerCamelCase : List[str] = "sshleifer/tinier_bart"
lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCamelCase__ , )
lowerCamelCase : Tuple = PyTorchBenchmark(UpperCamelCase__ , configs=[config] )
lowerCamelCase : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _lowercase ( self ) -> Union[str, Any]:
lowerCamelCase : Optional[Any] = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , save_to_csv=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(UpperCamelCase__ , "inf_time.csv" ) , train_memory_csv_file=os.path.join(UpperCamelCase__ , "train_mem.csv" ) , inference_memory_csv_file=os.path.join(UpperCamelCase__ , "inf_mem.csv" ) , train_time_csv_file=os.path.join(UpperCamelCase__ , "train_time.csv" ) , env_info_csv_file=os.path.join(UpperCamelCase__ , "env.csv" ) , multi_process=UpperCamelCase__ , )
lowerCamelCase : str = PyTorchBenchmark(UpperCamelCase__ )
benchmark.run()
self.assertTrue(Path(os.path.join(UpperCamelCase__ , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , "train_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , "train_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , "env.csv" ) ).exists() )
def _lowercase ( self ) -> Dict:
lowerCamelCase : Union[str, Any] = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(UpperCamelCase__ ):
self.assertTrue(hasattr(UpperCamelCase__ , "sequential" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "cumulative" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "current" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=UpperCamelCase__ , inference=UpperCamelCase__ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(UpperCamelCase__ , "log.txt" ) , log_print=UpperCamelCase__ , trace_memory_line_by_line=UpperCamelCase__ , multi_process=UpperCamelCase__ , )
lowerCamelCase : Any = PyTorchBenchmark(UpperCamelCase__ )
lowerCamelCase : Dict = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(UpperCamelCase__ , "log.txt" ) ).exists() )
| 311 | 1 |
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
A : str = logging.get_logger(__name__)
@add_end_docstrings(A_)
class _lowercase ( A_):
"""simple docstring"""
def __init__( self : Tuple , **__lowerCamelCase : Optional[Any] ):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
requires_backends(self , "vision" )
requires_backends(self , "torch" )
if self.framework != "pt":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
self.check_model_type(__lowerCamelCase )
def lowerCAmelCase ( self : Optional[Any] , **__lowerCamelCase : List[str] ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = {}
lowerCamelCase__ : Tuple = {}
lowerCamelCase__ : Dict = {}
# preprocess args
if "points_per_batch" in kwargs:
lowerCamelCase__ : str = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
lowerCamelCase__ : int = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
lowerCamelCase__ : Optional[Any] = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
lowerCamelCase__ : Dict = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
lowerCamelCase__ : Dict = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
lowerCamelCase__ : List[Any] = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
lowerCamelCase__ : Optional[int] = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
lowerCamelCase__ : Optional[int] = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
lowerCamelCase__ : List[str] = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
lowerCamelCase__ : List[Any] = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
lowerCamelCase__ : int = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
lowerCamelCase__ : Tuple = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : List[Any] , __lowerCamelCase : Any , *__lowerCamelCase : Any , __lowerCamelCase : int=None , __lowerCamelCase : str=None , **__lowerCamelCase : Optional[int] ):
'''simple docstring'''
return super().__call__(__lowerCamelCase , *__lowerCamelCase , num_workers=__lowerCamelCase , batch_size=__lowerCamelCase , **__lowerCamelCase )
def lowerCAmelCase ( self : str , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any]=64 , __lowerCamelCase : Any = 0 , __lowerCamelCase : List[str] = 512 / 1500 , __lowerCamelCase : List[str] = 32 , __lowerCamelCase : Dict = 1 , ):
'''simple docstring'''
lowerCamelCase__ : str = load_image(__lowerCamelCase )
lowerCamelCase__ : Optional[int] = self.image_processor.size["""longest_edge"""]
lowerCamelCase__ : List[str] = self.image_processor.generate_crop_boxes(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : List[str] = self.image_processor(images=__lowerCamelCase , return_tensors="pt" )
with self.device_placement():
if self.framework == "pt":
lowerCamelCase__ : str = self.get_inference_context()
with inference_context():
lowerCamelCase__ : int = self._ensure_tensor_on_device(__lowerCamelCase , device=self.device )
lowerCamelCase__ : List[str] = self.model.get_image_embeddings(model_inputs.pop("pixel_values" ) )
lowerCamelCase__ : int = image_embeddings
lowerCamelCase__ : str = grid_points.shape[1]
lowerCamelCase__ : Optional[int] = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. "
"To return all points at once, set points_per_batch to None" )
for i in range(0 , __lowerCamelCase , __lowerCamelCase ):
lowerCamelCase__ : Any = grid_points[:, i : i + points_per_batch, :, :]
lowerCamelCase__ : List[str] = input_labels[:, i : i + points_per_batch]
lowerCamelCase__ : Any = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def lowerCAmelCase ( self : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : Tuple=0.8_8 , __lowerCamelCase : Tuple=0.9_5 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Any=1 , ):
'''simple docstring'''
lowerCamelCase__ : Tuple = model_inputs.pop("input_boxes" )
lowerCamelCase__ : List[str] = model_inputs.pop("is_last" )
lowerCamelCase__ : Union[str, Any] = model_inputs.pop("original_sizes" ).tolist()
lowerCamelCase__ : List[str] = model_inputs.pop("reshaped_input_sizes" ).tolist()
lowerCamelCase__ : Any = self.model(**__lowerCamelCase )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
lowerCamelCase__ : Optional[int] = model_outputs["""pred_masks"""]
lowerCamelCase__ : int = self.image_processor.post_process_masks(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , binarize=__lowerCamelCase )
lowerCamelCase__ : Dict = model_outputs["""iou_scores"""]
lowerCamelCase__ : List[str] = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def lowerCAmelCase ( self : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple=False , __lowerCamelCase : Tuple=False , __lowerCamelCase : List[Any]=0.7 , ):
'''simple docstring'''
lowerCamelCase__ : int = []
lowerCamelCase__ : Optional[int] = []
lowerCamelCase__ : List[Any] = []
for model_output in model_outputs:
all_scores.append(model_output.pop("iou_scores" ) )
all_masks.extend(model_output.pop("masks" ) )
all_boxes.append(model_output.pop("boxes" ) )
lowerCamelCase__ : Union[str, Any] = torch.cat(__lowerCamelCase )
lowerCamelCase__ : Any = torch.cat(__lowerCamelCase )
lowerCamelCase__ : Dict = self.image_processor.post_process_for_mask_generation(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Optional[int] = defaultdict(__lowerCamelCase )
for output in model_outputs:
for k, v in output.items():
extra[k].append(__lowerCamelCase )
lowerCamelCase__ : Optional[Any] = {}
if output_rle_mask:
lowerCamelCase__ : Dict = rle_mask
if output_bboxes_mask:
lowerCamelCase__ : str = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 707 |
from __future__ import annotations
import time
import numpy as np
A : Dict = [8, 5, 9, 7]
A : Optional[Any] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
A : Any = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class _lowercase :
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : list[int] , __lowerCamelCase : list[list[int]] , __lowerCamelCase : list[list[int]] , ):
'''simple docstring'''
lowerCamelCase__ : int = claim_vector
lowerCamelCase__ : str = allocated_resources_table
lowerCamelCase__ : int = maximum_claim_table
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__lowerCamelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
return {self.__need().index(__lowerCamelCase ): i for i in self.__need()}
def lowerCAmelCase ( self : List[str] , **__lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.__need()
lowerCamelCase__ : str = self.__allocated_resources_table
lowerCamelCase__ : List[Any] = self.__available_resources()
lowerCamelCase__ : str = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n" )
while need_list:
lowerCamelCase__ : int = False
for each_need in need_list:
lowerCamelCase__ : Dict = True
for index, need in enumerate(__lowerCamelCase ):
if need > available_resources[index]:
lowerCamelCase__ : str = False
break
if execution:
lowerCamelCase__ : Tuple = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
lowerCamelCase__ : Any = original_need_index
print(f"Process {process_number + 1} is executing." )
# remove the process run from stack
need_list.remove(__lowerCamelCase )
# update available/freed resources stack
lowerCamelCase__ : Union[str, Any] = np.array(__lowerCamelCase ) + np.array(
alloc_resources_table[process_number] )
print(
"Updated available resource stack for processes: "
+ " ".join([str(__lowerCamelCase ) for x in available_resources] ) )
break
if safe:
print("The process is in a safe state.\n" )
else:
print("System in unsafe state. Aborting...\n" )
break
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
print(" " * 9 + "Allocated Resource Table" )
for item in self.__allocated_resources_table:
print(
f"P{self.__allocated_resources_table.index(__lowerCamelCase ) + 1}"
+ " ".join(f"{it:>8}" for it in item )
+ "\n" )
print(" " * 9 + "System Resource Table" )
for item in self.__maximum_claim_table:
print(
f"P{self.__maximum_claim_table.index(__lowerCamelCase ) + 1}"
+ " ".join(f"{it:>8}" for it in item )
+ "\n" )
print(
"Current Usage by Active Processes: "
+ " ".join(str(__lowerCamelCase ) for x in self.__claim_vector ) )
print(
"Initial Available Resources: "
+ " ".join(str(__lowerCamelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class lowerCamelCase_ :
def __init__( self : List[str] , _A : Union[str, Any] , _A : Tuple=13 , _A : Optional[int]=7 , _A : Optional[Any]=True , _A : Tuple=True , _A : Union[str, Any]=True , _A : Any=99 , _A : Optional[int]=32 , _A : Optional[Any]=5 , _A : Optional[Any]=4 , _A : Tuple=37 , _A : Tuple="gelu" , _A : int=0.1 , _A : Dict=0.1 , _A : Any=512 , _A : Union[str, Any]=16 , _A : Optional[int]=2 , _A : List[str]=0.0_2 , _A : Optional[Any]=3 , _A : Dict=4 , _A : int=None , ):
'''simple docstring'''
UpperCAmelCase__ : int = parent
UpperCAmelCase__ : List[Any] = batch_size
UpperCAmelCase__ : Optional[int] = seq_length
UpperCAmelCase__ : Union[str, Any] = is_training
UpperCAmelCase__ : Any = use_token_type_ids
UpperCAmelCase__ : Any = use_labels
UpperCAmelCase__ : Tuple = vocab_size
UpperCAmelCase__ : Optional[int] = hidden_size
UpperCAmelCase__ : Union[str, Any] = num_hidden_layers
UpperCAmelCase__ : List[Any] = num_attention_heads
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : int = hidden_act
UpperCAmelCase__ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase__ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Dict = max_position_embeddings
UpperCAmelCase__ : Any = type_vocab_size
UpperCAmelCase__ : Optional[Any] = type_sequence_label_size
UpperCAmelCase__ : Dict = initializer_range
UpperCAmelCase__ : Dict = num_labels
UpperCAmelCase__ : Optional[Any] = num_choices
UpperCAmelCase__ : int = scope
UpperCAmelCase__ : List[Any] = self.vocab_size - 1
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : Optional[int] = None
if self.use_token_type_ids:
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : str = None
UpperCAmelCase__ : Any = None
if self.use_labels:
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ : str = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ : Optional[int] = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
UpperCAmelCase__ : str = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowercase_ ( self : Tuple , _A : Tuple , _A : Optional[int] , _A : int , _A : Tuple , *_A : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = OpenAIGPTModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : Optional[int] = model(_A , token_type_ids=_A , head_mask=_A )
UpperCAmelCase__ : Union[str, Any] = model(_A , token_type_ids=_A )
UpperCAmelCase__ : Optional[int] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : Union[str, Any] , _A : str , _A : Dict , _A : List[str] , _A : int , *_A : str ):
'''simple docstring'''
UpperCAmelCase__ : Any = OpenAIGPTLMHeadModel(_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : List[str] = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : List[Any] , _A : Union[str, Any] , _A : Dict , _A : Dict , _A : List[Any] , *_A : str ):
'''simple docstring'''
UpperCAmelCase__ : Dict = OpenAIGPTDoubleHeadsModel(_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : str , _A : int , _A : Tuple , _A : Optional[int] , _A : Tuple , *_A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.num_labels
UpperCAmelCase__ : List[str] = OpenAIGPTForSequenceClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : str = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[str] = config_and_inputs
UpperCAmelCase__ : Union[str, Any] = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( __a , __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowerCAmelCase__ = (
{
'feature-extraction': OpenAIGPTModel,
'text-classification': OpenAIGPTForSequenceClassification,
'text-generation': OpenAIGPTLMHeadModel,
'zero-shot': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase_ ( self : str , _A : Tuple , _A : Union[str, Any] , _A : Tuple , _A : Tuple , _A : Dict ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def lowercase_ ( self : Union[str, Any] , _A : List[Any] , _A : int , _A : Union[str, Any]=False ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
UpperCAmelCase__ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_A , )
UpperCAmelCase__ : List[str] = inputs_dict['''labels''']
UpperCAmelCase__ : Optional[Any] = inputs_dict['''labels''']
UpperCAmelCase__ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_A , )
UpperCAmelCase__ : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = OpenAIGPTModelTester(self )
UpperCAmelCase__ : int = ConfigTester(self , config_class=_A , n_embd=37 )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_A )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_A )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_A )
@slow
def lowercase_ ( self : Dict ):
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Union[str, Any] = OpenAIGPTModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(_A )
UpperCAmelCase__ : List[str] = torch.tensor([[481, 4_735, 544]] , dtype=torch.long , device=_A ) # the president is
UpperCAmelCase__ : str = [
481,
4_735,
544,
246,
963,
870,
762,
239,
244,
40_477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
UpperCAmelCase__ : Tuple = model.generate(_A , do_sample=_A )
self.assertListEqual(output_ids[0].tolist() , _A )
| 75 |
'''simple docstring'''
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def lowerCAmelCase__ ( ):
__a : Any = torch.nn.Linear(2 , 4 )
__a : int = torch.optim.AdamW(model.parameters() , lr=1.0 )
__a : Tuple = torch.optim.lr_scheduler.OneCycleLR(SCREAMING_SNAKE_CASE__ , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
__a : Union[str, Any] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
__a : str = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
__a : List[Any] = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
class lowerCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
@require_cuda
def __magic_name__ ( self ) -> Optional[Any]:
__a : Optional[Any] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(_A ):
__a : Optional[Any] = Accelerator(cpu=_A )
def __magic_name__ ( self ) -> Union[str, Any]:
__a : Optional[Any] = Accelerator()
__a : List[str] = GradientState()
assert state.num_steps == 1
__a : Tuple = 4
assert state.num_steps == 4
assert state.sync_gradients is True
__a : List[str] = False
assert state.sync_gradients is False
GradientState._reset_state()
def __magic_name__ ( self ) -> Optional[int]:
__a : Any = Accelerator()
__a , __a , __a , __a , __a : Optional[int] = create_components()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Optional[Any] = accelerator.prepare(_A , _A , _A , _A , _A )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def __magic_name__ ( self ) -> str:
__a : Union[str, Any] = Accelerator()
__a , __a , __a , __a , __a : List[Any] = create_components()
accelerator.prepare(_A , _A , _A , _A , _A )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def __magic_name__ ( self ) -> Optional[int]:
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*_A , **_A ):
pass
with patch('torch.cuda.set_device' , _A ), patch_environment(ACCELERATE_TORCH_DEVICE='cuda:64' ):
__a : Optional[Any] = Accelerator()
self.assertEqual(str(accelerator.state.device ) , 'cuda:64' )
def __magic_name__ ( self ) -> Tuple:
__a : Optional[int] = Accelerator()
__a , __a , __a , __a , __a : str = create_components()
accelerator.prepare(_A , _A , _A , _A , _A )
__a : str = get_signature(_A )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_A )
# make sure random weights don't match
load_random_weights(_A )
self.assertTrue(abs(model_signature - get_signature(_A ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(_A )
self.assertTrue(abs(model_signature - get_signature(_A ) ) < 1E-3 )
def __magic_name__ ( self ) -> Optional[int]:
__a : List[Any] = Accelerator()
__a , __a , __a , __a , __a : Any = create_components()
accelerator.prepare(_A , _A , _A , _A , _A )
__a : Any = get_signature(_A )
# saving hook
def save_config(_A , _A , _A ):
__a : Any = {'class_name': models[0].__class__.__name__}
with open(os.path.join(_A , 'data.json' ) , 'w' ) as f:
json.dump(_A , _A )
# loading hook
def load_config(_A , _A ):
with open(os.path.join(_A , 'data.json' ) , 'r' ) as f:
__a : Tuple = json.load(_A )
__a : Union[str, Any] = config['class_name']
__a : List[Any] = accelerator.register_save_state_pre_hook(_A )
__a : str = accelerator.register_load_state_pre_hook(_A )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_A )
# make sure random weights don't match with hooks
load_random_weights(_A )
self.assertTrue(abs(model_signature - get_signature(_A ) ) > 1E-3 )
# random class name to verify correct one is loaded
__a : Dict = 'random'
# make sure loaded weights match with hooks
accelerator.load_state(_A )
self.assertTrue(abs(model_signature - get_signature(_A ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_A )
# make sure random weights don't match with hooks removed
load_random_weights(_A )
self.assertTrue(abs(model_signature - get_signature(_A ) ) > 1E-3 )
# random class name to verify correct one is loaded
__a : Union[str, Any] = 'random'
# make sure loaded weights match with hooks removed
accelerator.load_state(_A )
self.assertTrue(abs(model_signature - get_signature(_A ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def __magic_name__ ( self ) -> Union[str, Any]:
__a : str = Accelerator()
__a , __a , __a , __a , __a : Union[str, Any] = create_components()
__a : Tuple = None
# This should work
__a , __a , __a , __a , __a , __a : str = accelerator.prepare(
_A , _A , _A , _A , _A , _A )
self.assertTrue(dummy_obj is None )
def __magic_name__ ( self ) -> Dict:
__a : Tuple = Accelerator()
__a , __a , __a , __a , __a : List[Any] = create_components()
__a : str = [1, 2, 3]
# This should work
__a , __a , __a , __a , __a , __a : int = accelerator.prepare(
_A , _A , _A , _A , _A , _A )
self.assertEqual(
getattr(_A , '_is_accelerate_prepared' , _A ) , _A , 'Dummy object should have `_is_accelerate_prepared` set to `True`' , )
self.assertEqual(
getattr(_A , '_is_accelerate_prepared' , _A ) , _A , 'Model is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(_A , '_is_accelerate_prepared' , _A ) , _A , 'Optimizer is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(_A , '_is_accelerate_prepared' , _A ) , _A , 'Scheduler is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(_A , '_is_accelerate_prepared' , _A ) , _A , 'Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(_A , '_is_accelerate_prepared' , _A ) , _A , 'Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`' , )
@slow
@require_bnb
def __magic_name__ ( self ) -> Dict:
from transformers import AutoModelForCausalLM
__a : str = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , load_in_abit=_A , device_map={'': 0} , )
__a : Tuple = Accelerator()
# This should work
__a : int = accelerator.prepare(_A )
@slow
@require_bnb
def __magic_name__ ( self ) -> Dict:
from transformers import AutoModelForCausalLM
__a : List[Any] = Accelerator()
with init_empty_weights():
__a : str = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , )
model.tie_weights()
__a : Optional[Any] = infer_auto_device_map(_A )
__a : List[str] = 'cpu'
__a : List[Any] = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , device_map=_A , load_in_abit=_A , llm_inta_enable_fpaa_cpu_offload=_A )
# This should not work and get value error
with self.assertRaises(_A ):
__a : int = accelerator.prepare(_A )
@slow
@require_bnb
@require_multi_gpu
def __magic_name__ ( self ) -> Any:
from transformers import AutoModelForCausalLM
__a : str = {'distributed_type': DistributedType.MULTI_GPU}
with init_empty_weights():
__a : Tuple = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , )
model.tie_weights()
__a : Dict = infer_auto_device_map(_A )
__a : Optional[Any] = 1
__a : List[str] = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , load_in_abit=_A , device_map=_A , )
__a : Optional[int] = Accelerator()
# This should not work and get value error
with self.assertRaises(_A ):
__a : str = accelerator.prepare(_A )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def __magic_name__ ( self ) -> int:
from transformers import AutoModelForCausalLM
with init_empty_weights():
__a : int = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , )
__a : str = infer_auto_device_map(_A )
__a : Dict = 1
__a : int = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , load_in_abit=_A , device_map=_A , )
__a : List[str] = Accelerator()
# This should work
__a : List[str] = accelerator.prepare(_A )
@require_cuda
def __magic_name__ ( self ) -> Dict:
__a : List[str] = torch.nn.Linear(10 , 10 )
__a : Optional[Any] = torch.optim.SGD(model.parameters() , lr=0.01 )
__a : Any = Accelerator(cpu=_A )
__a : Optional[Any] = accelerator.prepare(_A )
| 597 | 0 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def __UpperCamelCase ( a : dict , a : str , a : set , a : set , a : dict , a : dict , a : PriorityQueue , a : dict , a : float | int , ) ->float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
snake_case = cst_fwd.get(a , np.inf )
snake_case = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
snake_case = new_cost_f
snake_case = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
snake_case = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __UpperCamelCase ( a : str , a : str , a : dict , a : dict ) ->int:
snake_case = -1
snake_case = set()
snake_case = set()
snake_case = {source: 0}
snake_case = {destination: 0}
snake_case = {source: None}
snake_case = {destination: None}
snake_case = PriorityQueue()
snake_case = PriorityQueue()
snake_case = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
snake_case , snake_case = queue_forward.get()
visited_forward.add(a )
snake_case , snake_case = queue_backward.get()
visited_backward.add(a )
snake_case = pass_and_relaxation(
a , a , a , a , a , a , a , a , a , )
snake_case = pass_and_relaxation(
a , a , a , a , a , a , a , a , a , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
snake_case = shortest_distance
return shortest_path_distance
_lowercase = {
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
_lowercase = {
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
'''simple docstring'''
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 44 | 0 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
lowerCAmelCase : Optional[Any] ='''\
'''
lowerCAmelCase : Dict ='''
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
'''
lowerCAmelCase : str ='''
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to \'cuda\' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]
>>> results = perplexity.compute(model_id=\'gpt2\',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
78.22
>>> print(round(results["perplexities"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = datasets.load_dataset("wikitext",
... "wikitext-2-raw-v1",
... split="test")["text"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!=\'\']
>>> results = perplexity.compute(model_id=\'gpt2\',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
60.35
>>> print(round(results["perplexities"][0], 2))
81.12
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def lowercase__ ( self : Tuple ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"input_texts": datasets.Value("string" ),
} ) , reference_urls=["https://huggingface.co/docs/transformers/perplexity"] , )
def lowercase__ ( self : List[Any] , lowercase : Optional[Any] , lowercase : List[Any] , lowercase : int = 16 , lowercase : bool = True , lowercase : Any=None ):
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
lowercase_ :Dict = "cuda"
else:
lowercase_ :Union[str, Any] = "cuda" if torch.cuda.is_available() else "cpu"
lowercase_ :Union[str, Any] = AutoModelForCausalLM.from_pretrained(lowercase )
lowercase_ :Dict = model.to(lowercase )
lowercase_ :Any = AutoTokenizer.from_pretrained(lowercase )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
lowercase_ :Any = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(lowercase ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
lowercase_ :Any = model.config.max_length - 1
else:
lowercase_ :int = model.config.max_length
lowercase_ :Tuple = tokenizer(
lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , return_tensors="pt" , return_attention_mask=lowercase , ).to(lowercase )
lowercase_ :int = encodings["input_ids"]
lowercase_ :List[str] = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
lowercase_ :Optional[int] = []
lowercase_ :List[str] = CrossEntropyLoss(reduction="none" )
for start_index in logging.tqdm(range(0 , len(lowercase ) , lowercase ) ):
lowercase_ :Tuple = min(start_index + batch_size , len(lowercase ) )
lowercase_ :Tuple = encoded_texts[start_index:end_index]
lowercase_ :Optional[Any] = attn_masks[start_index:end_index]
if add_start_token:
lowercase_ :Optional[Any] = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(lowercase )
lowercase_ :List[str] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
lowercase_ :int = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(lowercase ), attn_mask] , dim=1 )
lowercase_ :List[Any] = encoded_batch
with torch.no_grad():
lowercase_ :int = model(lowercase , attention_mask=lowercase ).logits
lowercase_ :Optional[Any] = out_logits[..., :-1, :].contiguous()
lowercase_ :int = labels[..., 1:].contiguous()
lowercase_ :List[Any] = attn_mask[..., 1:].contiguous()
lowercase_ :str = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , lowercase ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(lowercase )}
| 172 |
'''simple docstring'''
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowerCAmelCase : Optional[Any] =TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('''''', '''|''', '''|'''),
datarow=DataRow('''''', '''|''', '''|'''),
padding=1,
with_header_hide=None,
)
lowerCAmelCase : Optional[int] =[]
lowerCAmelCase : int =[]
lowerCAmelCase : List[Any] ={'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}}
lowerCAmelCase : List[str] =[
{
'''type''': '''header''',
'''text''': {
'''type''': '''plain_text''',
'''text''': F'''🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results''',
'''emoji''': True,
},
}
]
lowerCAmelCase : int =0
for log in Path().glob('''*.log'''):
lowerCAmelCase : Any =0
with open(log, '''r''') as f:
for line in f:
lowerCAmelCase : List[str] =json.loads(line)
if line.get('''nodeid''', '''''') != "":
lowerCAmelCase : Tuple =line['''nodeid''']
if line.get('''duration''', None) is not None:
lowerCAmelCase : List[Any] =F'''{line['duration']:.4f}'''
if line.get('''outcome''', '''''') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('''_''')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowerCAmelCase : List[Any] =[]
log.unlink()
lowerCAmelCase : Dict =''''''
lowerCAmelCase : List[str] =[]
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
lowerCAmelCase : Dict =[]
lowerCAmelCase : int ={}
for test in failed_tests:
lowerCAmelCase : Tuple =test[0].split('''::''')
lowerCAmelCase : str =data[0].split('''/''')[-1]
if data[0] not in filesafailed:
lowerCAmelCase : Optional[int] =[data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowerCAmelCase : str =[test[0] for test in failed_table]
lowerCAmelCase : int =list(set(files))
# Count number of instances in failed_tests
lowerCAmelCase : Tuple =[]
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowerCAmelCase : Optional[Any] =tabulate(
table,
headers=['''Test Location''', '''Num Failed'''],
tablefmt=hf_table_format,
stralign='''right''',
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_000:
lowerCAmelCase : Dict ='''Too many failed tests, please see the full report in the Action results.'''
lowerCAmelCase : List[str] =len(err) + 10
lowerCAmelCase : Tuple =message[: 3_000 - offset] + F'''\n...\n```\n{err}'''
print(F'''### {message}''')
else:
lowerCAmelCase : Dict ='''No failed tests! 🤗'''
print(F'''## {message}''')
payload.append(no_error_payload)
if os.environ.get('''TEST_TYPE''', '''''') != "":
from slack_sdk import WebClient
lowerCAmelCase : Any =WebClient(token=os.environ['''SLACK_API_TOKEN'''])
if message != "No failed tests! 🤗":
lowerCAmelCase : Any ={
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': message,
},
}
payload.append(md_report)
lowerCAmelCase : int ={
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': '''*For more details:*''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''Check Action results''',
'''emoji''': True,
},
'''url''': F'''https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
payload.append(action_button)
lowerCAmelCase : List[Any] ={
'''type''': '''context''',
'''elements''': [
{
'''type''': '''plain_text''',
'''text''': F'''Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}''',
}
],
}
payload.append(date_report)
lowerCAmelCase : Any =client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload)
lowerCAmelCase : str =response.data['''ts''']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowerCAmelCase : Tuple =''''''
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowerCAmelCase : Tuple =row[0]
else:
lowerCAmelCase : Tuple =''''''
lowerCAmelCase : Any ={
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': F'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```''',
},
}
client.chat_postMessage(
channel='''#accelerate-ci-daily''',
thread_ts=ts,
blocks=[payload],
)
| 172 | 1 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = len(snake_case_ )
while cur > 1:
# Find the maximum number in arr
__SCREAMING_SNAKE_CASE = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
__SCREAMING_SNAKE_CASE = arr[mi::-1] + arr[mi + 1 : len(snake_case_ )]
# Reverse whole list
__SCREAMING_SNAKE_CASE = arr[cur - 1 :: -1] + arr[cur : len(snake_case_ )]
cur -= 1
return arr
if __name__ == "__main__":
__magic_name__ = input("Enter numbers separated by a comma:\n").strip()
__magic_name__ = [int(item) for item in user_input.split(",")]
print(pancake_sort(unsorted))
| 719 |
"""simple docstring"""
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
__magic_name__ = re.compile(R"\s+")
def _lowerCAmelCase ( UpperCamelCase_ ):
return {"hash": hashlib.mda(re.sub(UpperCamelCase_ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = [len(UpperCamelCase_ ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(UpperCamelCase_ ), "line_max": max(UpperCamelCase_ )}
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_=5 ):
__SCREAMING_SNAKE_CASE = ["""auto-generated""", """autogenerated""", """automatically generated"""]
__SCREAMING_SNAKE_CASE = example["""content"""].splitlines()
for _, line in zip(range(UpperCamelCase_ ) , UpperCamelCase_ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_=5 , UpperCamelCase_=0.05 ):
__SCREAMING_SNAKE_CASE = ["""unit tests""", """test file""", """configuration file"""]
__SCREAMING_SNAKE_CASE = example["""content"""].splitlines()
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
# first test
for _, line in zip(range(UpperCamelCase_ ) , UpperCamelCase_ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
__SCREAMING_SNAKE_CASE = example["""content"""].count("""\n""" )
__SCREAMING_SNAKE_CASE = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = ["""def """, """class """, """for """, """while """]
__SCREAMING_SNAKE_CASE = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_=4 ):
__SCREAMING_SNAKE_CASE = example["""content"""].splitlines()
__SCREAMING_SNAKE_CASE = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = tokenizer(example["""content"""] , truncation=UpperCamelCase_ )["""input_ids"""]
__SCREAMING_SNAKE_CASE = len(example["""content"""] ) / len(UpperCamelCase_ )
return {"ratio": ratio}
def _lowerCAmelCase ( UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = {}
results.update(get_hash(UpperCamelCase_ ) )
results.update(line_stats(UpperCamelCase_ ) )
results.update(alpha_stats(UpperCamelCase_ ) )
results.update(char_token_ratio(UpperCamelCase_ ) )
results.update(is_autogenerated(UpperCamelCase_ ) )
results.update(is_config_or_test(UpperCamelCase_ ) )
results.update(has_no_keywords(UpperCamelCase_ ) )
results.update(has_few_assignments(UpperCamelCase_ ) )
return results
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if not check_uniques(UpperCamelCase_ , UpperCamelCase_ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def _lowerCAmelCase ( UpperCamelCase_ ):
with open(UpperCamelCase_ , """rb""" ) as f_in:
with gzip.open(str(UpperCamelCase_ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out:
shutil.copyfileobj(UpperCamelCase_ , UpperCamelCase_ )
os.unlink(UpperCamelCase_ )
# Settings
__magic_name__ = HfArgumentParser(PreprocessingArguments)
__magic_name__ = parser.parse_args()
if args.num_workers is None:
__magic_name__ = multiprocessing.cpu_count()
__magic_name__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
__magic_name__ = time.time()
__magic_name__ = load_dataset(args.dataset_name, split="train")
print(F"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
__magic_name__ = time.time()
__magic_name__ = ds.map(preprocess, num_proc=args.num_workers)
print(F"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
__magic_name__ = set(ds.unique("hash"))
__magic_name__ = len(uniques) / len(ds)
print(F"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
__magic_name__ = time.time()
__magic_name__ = ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args})
print(F"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(F"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
__magic_name__ = time.time()
__magic_name__, __magic_name__ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(F"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
__magic_name__ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / "duplicate_clusters.json", "w") as f:
json.dump(duplicate_clusters, f)
__magic_name__ = output_dir / "data"
data_dir.mkdir(exist_ok=True)
__magic_name__ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
__magic_name__ = str(data_dir / F"""file-{file_number+1:012}.json""")
__magic_name__ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F"""Time to save dataset: {time.time()-t_start:.2f}""")
| 248 | 0 |
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( UpperCamelCase : list[list[int]] ) -> int:
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(UpperCamelCase ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(UpperCamelCase ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 273 |
"""simple docstring"""
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training')
# TF training parameters
a : Optional[Any] = False
a : str = False
def __magic_name__ ( UpperCamelCase : Namespace ) -> Optional[int]:
return TrainCommand(UpperCamelCase )
class lowercase(_lowercase ):
@staticmethod
def lowercase__ ( __SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
a__ = parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=__SCREAMING_SNAKE_CASE , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=__SCREAMING_SNAKE_CASE , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=__SCREAMING_SNAKE_CASE , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=__SCREAMING_SNAKE_CASE , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=__SCREAMING_SNAKE_CASE , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=__SCREAMING_SNAKE_CASE , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=__SCREAMING_SNAKE_CASE , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=__SCREAMING_SNAKE_CASE , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=__SCREAMING_SNAKE_CASE , default=3_2 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=__SCREAMING_SNAKE_CASE , default=6_4 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=__SCREAMING_SNAKE_CASE , default=3e-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=__SCREAMING_SNAKE_CASE , default=1e-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
def __init__( self , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
a__ = logging.get_logger('transformers-cli/training' )
a__ = 'tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=__SCREAMING_SNAKE_CASE )
a__ = args.output
a__ = args.column_label
a__ = args.column_text
a__ = args.column_id
self.logger.info(f'Loading {args.task} pipeline for {args.model}' )
if args.task == "text_classification":
a__ = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'Loading dataset from {args.train_data}' )
a__ = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
a__ = None
if args.validation_data:
self.logger.info(f'Loading validation dataset from {args.validation_data}' )
a__ = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
a__ = args.validation_split
a__ = args.train_batch_size
a__ = args.valid_batch_size
a__ = args.learning_rate
a__ = args.adam_epsilon
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def lowercase__ ( self ) -> Any:
"""simple docstring"""
raise NotImplementedError
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 273 | 1 |
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
__UpperCAmelCase : int = [0 for i in range(r + 1 )]
# nc0 = 1
__UpperCAmelCase : List[Any] = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
__UpperCAmelCase : Any = min(lowercase_ , lowercase_ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 705 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowercase__ , lowercase__=7 , lowercase__=3 , lowercase__=1_8 , lowercase__=3_0 , lowercase__=4_0_0 , lowercase__=True , lowercase__=None , lowercase__=True , ):
__UpperCAmelCase : Union[str, Any] = size if size is not None else {'''height''': 1_8, '''width''': 1_8}
__UpperCAmelCase : Any = parent
__UpperCAmelCase : Dict = batch_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : int = image_size
__UpperCAmelCase : Tuple = min_resolution
__UpperCAmelCase : str = max_resolution
__UpperCAmelCase : Optional[int] = do_resize
__UpperCAmelCase : Tuple = size
__UpperCAmelCase : Union[str, Any] = do_normalize
def A( self):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
]),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCamelCase ( _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Dict = ImageGPTImageProcessor if is_vision_available() else None
def A( self):
__UpperCAmelCase : Optional[Any] = ImageGPTImageProcessingTester(self)
@property
def A( self):
return self.image_processor_tester.prepare_image_processor_dict()
def A( self):
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowercase__ , '''clusters'''))
self.assertTrue(hasattr(lowercase__ , '''do_resize'''))
self.assertTrue(hasattr(lowercase__ , '''size'''))
self.assertTrue(hasattr(lowercase__ , '''do_normalize'''))
def A( self):
__UpperCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''height''': 1_8, '''width''': 1_8})
__UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2)
self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2})
def A( self):
__UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict)
__UpperCAmelCase : Any = json.loads(image_processor.to_json_string())
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , obj[key]))
else:
self.assertEqual(obj[key] , lowercase__)
def A( self):
__UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Dict = os.path.join(lowercase__ , '''image_processor.json''')
image_processor_first.to_json_file(lowercase__)
__UpperCAmelCase : Union[str, Any] = self.image_processing_class.from_json_file(lowercase__).to_dict()
__UpperCAmelCase : Any = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , lowercase__)
def A( self):
__UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowercase__)
__UpperCAmelCase : Dict = self.image_processing_class.from_pretrained(lowercase__).to_dict()
__UpperCAmelCase : Optional[Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase__ , image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key] , lowercase__)
@unittest.skip('''ImageGPT requires clusters at initialization''')
def A( self):
pass
def __SCREAMING_SNAKE_CASE ( ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[str] = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
__UpperCAmelCase : Optional[Any] = Image.open(dataset[4]['''file'''] )
__UpperCAmelCase : Optional[int] = Image.open(dataset[5]['''file'''] )
__UpperCAmelCase : int = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def A( self):
__UpperCAmelCase : int = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''')
__UpperCAmelCase : Any = prepare_images()
# test non-batched
__UpperCAmelCase : int = image_processing(images[0] , return_tensors='''pt''')
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4))
__UpperCAmelCase : int = [3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowercase__)
# test batched
__UpperCAmelCase : int = image_processing(lowercase__ , return_tensors='''pt''')
self.assertIsInstance(encoding.input_ids , torch.LongTensor)
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4))
__UpperCAmelCase : Any = [3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowercase__)
| 675 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = CanineTokenizer
__SCREAMING_SNAKE_CASE = False
def __lowerCamelCase ( self ) -> Optional[int]:
super().setUp()
__UpperCamelCase = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowerCamelCase ( self ) -> Optional[Any]:
return CanineTokenizer.from_pretrained("""google/canine-s""" )
def __lowerCamelCase ( self , **lowercase ) -> CanineTokenizer:
__UpperCamelCase = self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
__UpperCamelCase = 1_0_2_4
return tokenizer
@require_torch
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase = self.canine_tokenizer
__UpperCamelCase = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""]
# fmt: off
__UpperCamelCase = [5_7_3_4_4, 7_6, 1_0_5, 1_0_2, 1_0_1, 3_2, 1_0_5, 1_1_5, 3_2, 1_0_8, 1_0_5, 1_0_7, 1_0_1, 3_2, 9_7, 3_2, 9_8, 1_1_1, 1_2_0, 3_2, 1_1_1, 1_0_2, 3_2, 9_9, 1_0_4, 1_1_1, 9_9, 1_1_1, 1_0_8, 9_7, 1_1_6, 1_0_1, 1_1_5, 4_6, 5_7_3_4_5, 0, 0, 0, 0]
# fmt: on
__UpperCamelCase = tokenizer(lowercase , padding=lowercase , return_tensors="""pt""" )
self.assertIsInstance(lowercase , lowercase )
__UpperCamelCase = list(batch.input_ids.numpy()[0] )
self.assertListEqual(lowercase , lowercase )
self.assertEqual((2, 3_9) , batch.input_ids.shape )
self.assertEqual((2, 3_9) , batch.attention_mask.shape )
@require_torch
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = self.canine_tokenizer
__UpperCamelCase = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""]
__UpperCamelCase = tokenizer(lowercase , padding=lowercase , return_tensors="""pt""" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("""input_ids""" , lowercase )
self.assertIn("""attention_mask""" , lowercase )
self.assertIn("""token_type_ids""" , lowercase )
@require_torch
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = self.canine_tokenizer
__UpperCamelCase = [
"""What's the weater?""",
"""It's about 25 degrees.""",
]
__UpperCamelCase = tokenizer(
text_target=lowercase , max_length=3_2 , padding="""max_length""" , truncation=lowercase , return_tensors="""pt""" )
self.assertEqual(3_2 , targets["""input_ids"""].shape[1] )
def __lowerCamelCase ( self ) -> Union[str, Any]:
# safety check on max_len default value so we are sure the test works
__UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
__UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCamelCase = tempfile.mkdtemp()
__UpperCamelCase = """ He is very happy, UNwant\u00E9d,running"""
__UpperCamelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
__UpperCamelCase = tokenizer.__class__.from_pretrained(lowercase )
__UpperCamelCase = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
shutil.rmtree(lowercase )
__UpperCamelCase = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCamelCase = tempfile.mkdtemp()
__UpperCamelCase = """ He is very happy, UNwant\u00E9d,running"""
__UpperCamelCase = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
__UpperCamelCase = chr(0Xe007 )
additional_special_tokens.append(lowercase )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
__UpperCamelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
__UpperCamelCase = tokenizer.__class__.from_pretrained(lowercase )
__UpperCamelCase = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
self.assertIn(lowercase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
__UpperCamelCase = tokenizer.__class__.from_pretrained(lowercase , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(lowercase )
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = self.get_tokenizers(do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
__UpperCamelCase , __UpperCamelCase = self.get_clean_sequence(lowercase )
# a special token for Canine can be defined as follows:
__UpperCamelCase = 0Xe005
__UpperCamelCase = chr(lowercase )
tokenizer.add_special_tokens({"""cls_token""": special_token} )
__UpperCamelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertEqual(len(lowercase ) , 1 )
__UpperCamelCase = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=lowercase )
__UpperCamelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
__UpperCamelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
__UpperCamelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertEqual(lowercase , input_encoded + special_token_id )
__UpperCamelCase = tokenizer.decode(lowercase , skip_special_tokens=lowercase )
self.assertTrue(special_token not in decoded )
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = self.get_tokenizers(do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
__UpperCamelCase = chr(0Xe005 )
__UpperCamelCase = chr(0Xe006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=lowercase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} )
__UpperCamelCase = tokenizer.tokenize(lowercase )
__UpperCamelCase = tokenizer.tokenize(lowercase )
self.assertEqual(len(lowercase ) , 1 )
self.assertEqual(len(lowercase ) , 1 )
self.assertEqual(token_a[0] , lowercase )
self.assertEqual(token_a[0] , lowercase )
@require_tokenizers
def __lowerCamelCase ( self ) -> Optional[int]:
__UpperCamelCase = self.get_tokenizers(do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# a special token for Canine can be defined as follows:
__UpperCamelCase = 0Xe006
__UpperCamelCase = chr(lowercase )
__UpperCamelCase = AddedToken(lowercase , lstrip=lowercase )
tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(lowercase )
tokenizer.from_pretrained(lowercase )
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase )
with open(os.path.join(lowercase , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
__UpperCamelCase = json.load(lowercase )
with open(os.path.join(lowercase , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
__UpperCamelCase = json.load(lowercase )
# a special token for Canine can be defined as follows:
__UpperCamelCase = 0Xe006
__UpperCamelCase = chr(lowercase )
__UpperCamelCase = [new_token_a]
__UpperCamelCase = [new_token_a]
with open(os.path.join(lowercase , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowercase , lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__UpperCamelCase = tokenizer_class.from_pretrained(lowercase , extra_ids=0 )
self.assertIn(lowercase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
__UpperCamelCase = 0Xe007
__UpperCamelCase = chr(lowercase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__UpperCamelCase = [AddedToken(lowercase , lstrip=lowercase )]
__UpperCamelCase = tokenizer_class.from_pretrained(
lowercase , additional_special_tokens=lowercase , extra_ids=0 )
self.assertIn(lowercase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = self.get_tokenizers(do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
__UpperCamelCase = """hello world"""
if self.space_between_special_tokens:
__UpperCamelCase = """[CLS] hello world [SEP]"""
else:
__UpperCamelCase = input
__UpperCamelCase = tokenizer.encode(lowercase , add_special_tokens=lowercase )
__UpperCamelCase = tokenizer.decode(lowercase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(lowercase , [output, output.lower()] )
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
__UpperCamelCase = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
__UpperCamelCase = """a"""
__UpperCamelCase = ord(lowercase )
for attr in attributes_list:
setattr(lowercase , attr + """_id""" , lowercase )
self.assertEqual(getattr(lowercase , lowercase ) , lowercase )
self.assertEqual(getattr(lowercase , attr + """_id""" ) , lowercase )
setattr(lowercase , attr + """_id""" , lowercase )
self.assertEqual(getattr(lowercase , lowercase ) , lowercase )
self.assertEqual(getattr(lowercase , attr + """_id""" ) , lowercase )
setattr(lowercase , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(lowercase , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(lowercase , """additional_special_tokens_ids""" ) , [] )
__UpperCamelCase = 0Xe006
__UpperCamelCase = chr(lowercase )
setattr(lowercase , """additional_special_tokens_ids""" , [additional_special_token_id] )
self.assertListEqual(getattr(lowercase , """additional_special_tokens""" ) , [additional_special_token] )
self.assertListEqual(getattr(lowercase , """additional_special_tokens_ids""" ) , [additional_special_token_id] )
def __lowerCamelCase ( self ) -> Any:
pass
def __lowerCamelCase ( self ) -> Any:
pass
def __lowerCamelCase ( self ) -> Any:
pass
def __lowerCamelCase ( self ) -> Union[str, Any]:
pass
def __lowerCamelCase ( self ) -> Dict:
pass
def __lowerCamelCase ( self ) -> List[str]:
pass
def __lowerCamelCase ( self ) -> Union[str, Any]:
pass
def __lowerCamelCase ( self ) -> Optional[Any]:
pass
| 601 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
a__ : List[Any] = logging.get_logger(__name__)
a__ : Dict = {'vocab_file': 'vocab.txt'}
a__ : Dict = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
a__ : Dict = {
'YituTech/conv-bert-base': 5_1_2,
'YituTech/conv-bert-medium-small': 5_1_2,
'YituTech/conv-bert-small': 5_1_2,
}
a__ : Optional[Any] = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ConvBertTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ) -> List[str]:
super().__init__(
lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , )
__UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowercase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowercase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowercase ) != tokenize_chinese_chars
):
__UpperCamelCase = getattr(lowercase , normalizer_state.pop("""type""" ) )
__UpperCamelCase = do_lower_case
__UpperCamelCase = strip_accents
__UpperCamelCase = tokenize_chinese_chars
__UpperCamelCase = normalizer_class(**lowercase )
__UpperCamelCase = do_lower_case
def __lowerCamelCase ( self , lowercase , lowercase=None ) -> List[Any]:
__UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> List[int]:
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> Tuple[str]:
__UpperCamelCase = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
| 601 | 1 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
_lowerCamelCase = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] ):
if isinstance(UpperCamelCase__ , torch.Tensor ):
return image
elif isinstance(UpperCamelCase__ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE__ = [image]
SCREAMING_SNAKE_CASE__ = [trans(img.convert("""RGB""" ) ) for img in image]
SCREAMING_SNAKE_CASE__ = torch.stack(UpperCamelCase__ )
return image
class UpperCamelCase_ ( UpperCamelCase__ ):
def __init__( self :Dict , __A :int , __A :List[str] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
SCREAMING_SNAKE_CASE__ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__A , scheduler=__A )
def _snake_case ( self :str , __A :Optional[Any] ) -> Any:
"""simple docstring"""
if strength < 0 or strength > 1:
raise ValueError(f'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def _snake_case ( self :Union[str, Any] , __A :List[Any] , __A :Any , __A :Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = min(int(num_inference_steps * strength ) , __A )
SCREAMING_SNAKE_CASE__ = max(num_inference_steps - init_timestep , 0 )
SCREAMING_SNAKE_CASE__ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _snake_case ( self :List[str] , __A :Optional[int] , __A :Any , __A :List[Any] , __A :Any , __A :Dict , __A :Union[str, Any]=None ) -> Any:
"""simple docstring"""
if not isinstance(__A , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__A )}''' )
SCREAMING_SNAKE_CASE__ = image.to(device=__A , dtype=__A )
if isinstance(__A , __A ) and len(__A ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(__A )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
SCREAMING_SNAKE_CASE__ = init_latents.shape
SCREAMING_SNAKE_CASE__ = randn_tensor(__A , generator=__A , device=__A , dtype=__A )
# get latents
print("""add noise to latents at timestep""" , __A )
SCREAMING_SNAKE_CASE__ = self.scheduler.add_noise(__A , __A , __A )
SCREAMING_SNAKE_CASE__ = init_latents
return latents
@torch.no_grad()
def __call__( self :Any , __A :Union[torch.FloatTensor, PIL.Image.Image] = None , __A :float = 0.8 , __A :int = 1 , __A :Optional[Union[torch.Generator, List[torch.Generator]]] = None , __A :float = 0.0 , __A :int = 50 , __A :Optional[bool] = None , __A :Optional[str] = "pil" , __A :bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
self.check_inputs(__A )
# 2. Preprocess image
SCREAMING_SNAKE_CASE__ = preprocess(__A )
# 3. set timesteps
self.scheduler.set_timesteps(__A , device=self.device )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.get_timesteps(__A , __A , self.device )
SCREAMING_SNAKE_CASE__ = timesteps[:1].repeat(__A )
# 4. Prepare latent variables
SCREAMING_SNAKE_CASE__ = self.prepare_latents(__A , __A , __A , self.unet.dtype , self.device , __A )
SCREAMING_SNAKE_CASE__ = latents
# 5. Denoising loop
for t in self.progress_bar(__A ):
# 1. predict noise model_output
SCREAMING_SNAKE_CASE__ = self.unet(__A , __A ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
SCREAMING_SNAKE_CASE__ = self.scheduler.step(
__A , __A , __A , eta=__A , use_clipped_model_output=__A , generator=__A , ).prev_sample
SCREAMING_SNAKE_CASE__ = (image / 2 + 0.5).clamp(0 , 1 )
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE__ = self.numpy_to_pil(__A )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=__A ) | 59 |
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: str , UpperCamelCase__: str , UpperCamelCase__: str ):
def get_masked_lm_array(UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = f'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
SCREAMING_SNAKE_CASE__ = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
if "kernel" in name:
SCREAMING_SNAKE_CASE__ = array.transpose()
return torch.from_numpy(UpperCamelCase__ )
def get_encoder_array(UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = f'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
SCREAMING_SNAKE_CASE__ = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
if "kernel" in name:
SCREAMING_SNAKE_CASE__ = array.transpose()
return torch.from_numpy(UpperCamelCase__ )
def get_encoder_layer_array(UpperCamelCase__: int , UpperCamelCase__: str ):
SCREAMING_SNAKE_CASE__ = f'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
SCREAMING_SNAKE_CASE__ = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
if "kernel" in name:
SCREAMING_SNAKE_CASE__ = array.transpose()
return torch.from_numpy(UpperCamelCase__ )
def get_encoder_attention_layer_array(UpperCamelCase__: int , UpperCamelCase__: str , UpperCamelCase__: Any ):
SCREAMING_SNAKE_CASE__ = f'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE'''
SCREAMING_SNAKE_CASE__ = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = array.reshape(UpperCamelCase__ )
if "kernel" in name:
SCREAMING_SNAKE_CASE__ = array.transpose()
return torch.from_numpy(UpperCamelCase__ )
print(f'''Loading model based on config from {config_path}...''' )
SCREAMING_SNAKE_CASE__ = BertConfig.from_json_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = BertForMaskedLM(UpperCamelCase__ )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
SCREAMING_SNAKE_CASE__ = model.bert.encoder.layer[layer_index]
# Self-attention
SCREAMING_SNAKE_CASE__ = layer.attention.self
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
UpperCamelCase__ , """_query_dense/kernel""" , self_attn.query.weight.data.shape )
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
UpperCamelCase__ , """_query_dense/bias""" , self_attn.query.bias.data.shape )
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
UpperCamelCase__ , """_key_dense/kernel""" , self_attn.key.weight.data.shape )
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
UpperCamelCase__ , """_key_dense/bias""" , self_attn.key.bias.data.shape )
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
UpperCamelCase__ , """_value_dense/kernel""" , self_attn.value.weight.data.shape )
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
UpperCamelCase__ , """_value_dense/bias""" , self_attn.value.bias.data.shape )
# Self-attention Output
SCREAMING_SNAKE_CASE__ = layer.attention.output
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
UpperCamelCase__ , """_output_dense/kernel""" , self_output.dense.weight.data.shape )
SCREAMING_SNAKE_CASE__ = get_encoder_attention_layer_array(
UpperCamelCase__ , """_output_dense/bias""" , self_output.dense.bias.data.shape )
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(UpperCamelCase__ , """_attention_layer_norm/gamma""" )
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(UpperCamelCase__ , """_attention_layer_norm/beta""" )
# Intermediate
SCREAMING_SNAKE_CASE__ = layer.intermediate
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(UpperCamelCase__ , """_intermediate_dense/kernel""" )
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(UpperCamelCase__ , """_intermediate_dense/bias""" )
# Output
SCREAMING_SNAKE_CASE__ = layer.output
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(UpperCamelCase__ , """_output_dense/kernel""" )
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(UpperCamelCase__ , """_output_dense/bias""" )
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(UpperCamelCase__ , """_output_layer_norm/gamma""" )
SCREAMING_SNAKE_CASE__ = get_encoder_layer_array(UpperCamelCase__ , """_output_layer_norm/beta""" )
# Embeddings
SCREAMING_SNAKE_CASE__ = get_encoder_array("""_position_embedding_layer/embeddings""" )
SCREAMING_SNAKE_CASE__ = get_encoder_array("""_type_embedding_layer/embeddings""" )
SCREAMING_SNAKE_CASE__ = get_encoder_array("""_embedding_norm_layer/gamma""" )
SCREAMING_SNAKE_CASE__ = get_encoder_array("""_embedding_norm_layer/beta""" )
# LM Head
SCREAMING_SNAKE_CASE__ = model.cls.predictions.transform
SCREAMING_SNAKE_CASE__ = get_masked_lm_array("""dense/kernel""" )
SCREAMING_SNAKE_CASE__ = get_masked_lm_array("""dense/bias""" )
SCREAMING_SNAKE_CASE__ = get_masked_lm_array("""layer_norm/gamma""" )
SCREAMING_SNAKE_CASE__ = get_masked_lm_array("""layer_norm/beta""" )
SCREAMING_SNAKE_CASE__ = get_masked_lm_array("""embedding_table""" )
# Pooling
SCREAMING_SNAKE_CASE__ = BertPooler(config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = get_encoder_array("""_pooler_layer/kernel""" )
SCREAMING_SNAKE_CASE__ = get_encoder_array("""_pooler_layer/bias""" )
# Export final model
model.save_pretrained(UpperCamelCase__ )
# Integration test - should load without any errors ;)
SCREAMING_SNAKE_CASE__ = BertForMaskedLM.from_pretrained(UpperCamelCase__ )
print(new_model.eval() )
print("""Model conversion was done sucessfully!""" )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow Token Dropping checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model.',
)
_lowerCamelCase = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path) | 59 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : int = logging.get_logger(__name__)
snake_case__ : Union[str, Any] = {
'asapp/sew-tiny-100k': 'https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = "sew"
def __init__( self : str , __a : Optional[int]=32 , __a : int=768 , __a : Optional[int]=12 , __a : List[str]=12 , __a : str=3_072 , __a : Optional[int]=2 , __a : Dict="gelu" , __a : str=0.1 , __a : List[Any]=0.1 , __a : Dict=0.1 , __a : List[Any]=0.0 , __a : Optional[int]=0.1 , __a : Union[str, Any]=0.1 , __a : int=0.02 , __a : Dict=1e-5 , __a : str="group" , __a : Tuple="gelu" , __a : Optional[Any]=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __a : str=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __a : str=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __a : Dict=False , __a : Dict=128 , __a : List[str]=16 , __a : Optional[int]=True , __a : Optional[Any]=0.05 , __a : int=10 , __a : Union[str, Any]=2 , __a : Optional[int]=0.0 , __a : List[str]=10 , __a : Any=0 , __a : Tuple="mean" , __a : Union[str, Any]=False , __a : Union[str, Any]=False , __a : List[Any]=256 , __a : str=0 , __a : Union[str, Any]=1 , __a : Optional[Any]=2 , **__a : Any , ) ->List[Any]:
super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a )
lowerCamelCase_ : str = hidden_size
lowerCamelCase_ : Dict = feat_extract_norm
lowerCamelCase_ : Tuple = feat_extract_activation
lowerCamelCase_ : Union[str, Any] = list(__a )
lowerCamelCase_ : str = list(__a )
lowerCamelCase_ : Dict = list(__a )
lowerCamelCase_ : Optional[int] = conv_bias
lowerCamelCase_ : Any = num_conv_pos_embeddings
lowerCamelCase_ : int = num_conv_pos_embedding_groups
lowerCamelCase_ : Optional[int] = len(self.conv_dim )
lowerCamelCase_ : Dict = num_hidden_layers
lowerCamelCase_ : Any = intermediate_size
lowerCamelCase_ : Optional[Any] = squeeze_factor
lowerCamelCase_ : List[str] = hidden_act
lowerCamelCase_ : str = num_attention_heads
lowerCamelCase_ : Optional[int] = hidden_dropout
lowerCamelCase_ : str = attention_dropout
lowerCamelCase_ : int = activation_dropout
lowerCamelCase_ : Optional[Any] = feat_proj_dropout
lowerCamelCase_ : Optional[int] = final_dropout
lowerCamelCase_ : str = layerdrop
lowerCamelCase_ : List[str] = layer_norm_eps
lowerCamelCase_ : Optional[Any] = initializer_range
lowerCamelCase_ : List[str] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase_ : Tuple = apply_spec_augment
lowerCamelCase_ : List[Any] = mask_time_prob
lowerCamelCase_ : str = mask_time_length
lowerCamelCase_ : List[str] = mask_time_min_masks
lowerCamelCase_ : str = mask_feature_prob
lowerCamelCase_ : Optional[int] = mask_feature_length
lowerCamelCase_ : Any = mask_feature_min_masks
# ctc loss
lowerCamelCase_ : Optional[Any] = ctc_loss_reduction
lowerCamelCase_ : str = ctc_zero_infinity
# sequence classification
lowerCamelCase_ : Any = use_weighted_layer_sum
lowerCamelCase_ : str = classifier_proj_size
@property
def _lowerCAmelCase ( self : Union[str, Any] ) ->Any:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 278 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
snake_case__ : List[Any] = '\nimport os\n'
snake_case__ : List[str] = '\ndef foo():\n import os\n return False\n'
snake_case__ : List[Any] = '\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n'
snake_case__ : List[str] = '\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n'
snake_case__ : Optional[Any] = '\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n'
snake_case__ : Tuple = '\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n'
snake_case__ : str = '\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n'
snake_case__ : Optional[Any] = '\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n'
snake_case__ : Any = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n'
snake_case__ : Union[str, Any] = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n'
snake_case__ : Any = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("""case""" , A__ )
def __lowerCamelCase ( A__ : Dict , A__ : int ) -> List[Any]:
lowerCamelCase_ : List[str] = os.path.join(A__ , """test_file.py""" )
with open(A__ , """w""" ) as _tmp_file:
_tmp_file.write(A__ )
lowerCamelCase_ : str = get_imports(A__ )
assert parsed_imports == ["os"]
| 278 | 1 |
"""simple docstring"""
import sys
def __a ( A ):
'''simple docstring'''
lowercase__ = len(A )
lowercase__ = [[0 for x in range(A )] for x in range(A )]
lowercase__ = [[0 for x in range(A )] for x in range(A )]
for chain_length in range(2 , A ):
for a in range(1 , n - chain_length + 1 ):
lowercase__ = a + chain_length - 1
lowercase__ = sys.maxsize
for c in range(A , A ):
lowercase__ = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
lowercase__ = cost
lowercase__ = c
return matrix, sol
def __a ( A , A , A ):
'''simple docstring'''
if i == j:
print("A" + str(A ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(A , A , optimal_solution[i][j] )
print_optiomal_solution(A , optimal_solution[i][j] + 1 , A )
print(")" , end=" " )
def __a ( ):
'''simple docstring'''
lowercase__ = [30, 35, 15, 5, 10, 20, 25]
lowercase__ = len(A )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
lowercase__ , lowercase__ = matrix_chain_order(A )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(A , 1 , n - 1 )
if __name__ == "__main__":
main()
| 668 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_: List[Any] = logging.get_logger(__name__)
lowerCAmelCase_: int = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class a__ ( _a ):
snake_case_ = "markuplm"
def __init__( self, _UpperCAmelCase=3_0522, _UpperCAmelCase=768, _UpperCAmelCase=12, _UpperCAmelCase=12, _UpperCAmelCase=3072, _UpperCAmelCase="gelu", _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=512, _UpperCAmelCase=2, _UpperCAmelCase=0.02, _UpperCAmelCase=1E-12, _UpperCAmelCase=0, _UpperCAmelCase=0, _UpperCAmelCase=2, _UpperCAmelCase=256, _UpperCAmelCase=1024, _UpperCAmelCase=216, _UpperCAmelCase=1001, _UpperCAmelCase=32, _UpperCAmelCase=50, _UpperCAmelCase="absolute", _UpperCAmelCase=True, _UpperCAmelCase=None, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
pad_token_id=_UpperCAmelCase, bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase, **_UpperCAmelCase, )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = classifier_dropout
# additional properties
lowercase__ = max_depth
lowercase__ = max_xpath_tag_unit_embeddings
lowercase__ = max_xpath_subs_unit_embeddings
lowercase__ = tag_pad_id
lowercase__ = subs_pad_id
lowercase__ = xpath_unit_hidden_size
| 668 | 1 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase( __snake_case ):
'''simple docstring'''
__magic_name__ = (CMStochasticIterativeScheduler,)
__magic_name__ = 10
def lowerCAmelCase__ ( self , **snake_case_ ):
_A = {
'num_train_timesteps': 201,
'sigma_min': 0.002,
'sigma_max': 80.0,
}
config.update(**snake_case_ )
return config
def lowerCAmelCase__ ( self ):
_A = 10
_A = self.get_scheduler_config()
_A = self.scheduler_classes[0](**snake_case_ )
scheduler.set_timesteps(snake_case_ )
_A = scheduler.timesteps[0]
_A = scheduler.timesteps[1]
_A = self.dummy_sample
_A = 0.1 * sample
_A = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
_A = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCAmelCase__ ( self ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def lowerCAmelCase__ ( self ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**snake_case_ )
_A = 1
scheduler.set_timesteps(snake_case_ )
_A = scheduler.timesteps
_A = torch.manual_seed(0 )
_A = self.dummy_model()
_A = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(snake_case_ ):
# 1. scale model input
_A = scheduler.scale_model_input(snake_case_ , snake_case_ )
# 2. predict noise residual
_A = model(snake_case_ , snake_case_ )
# 3. predict previous sample x_t-1
_A = scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample
_A = pred_prev_sample
_A = torch.sum(torch.abs(snake_case_ ) )
_A = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 192.7614 ) < 1E-2
assert abs(result_mean.item() - 0.2510 ) < 1E-3
def lowerCAmelCase__ ( self ):
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**snake_case_ )
_A = [106, 0]
scheduler.set_timesteps(timesteps=snake_case_ )
_A = scheduler.timesteps
_A = torch.manual_seed(0 )
_A = self.dummy_model()
_A = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
_A = scheduler.scale_model_input(snake_case_ , snake_case_ )
# 2. predict noise residual
_A = model(snake_case_ , snake_case_ )
# 3. predict previous sample x_t-1
_A = scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample
_A = pred_prev_sample
_A = torch.sum(torch.abs(snake_case_ ) )
_A = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 347.6357 ) < 1E-2
assert abs(result_mean.item() - 0.4527 ) < 1E-3
def lowerCAmelCase__ ( self ):
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**snake_case_ )
_A = [39, 30, 12, 15, 0]
with self.assertRaises(snake_case_ , msg='`timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**snake_case_ )
_A = [39, 30, 12, 1, 0]
_A = len(snake_case_ )
with self.assertRaises(snake_case_ , msg='Can only pass one of `num_inference_steps` or `timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=snake_case_ , timesteps=snake_case_ )
def lowerCAmelCase__ ( self ):
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**snake_case_ )
_A = [scheduler.config.num_train_timesteps]
with self.assertRaises(
snake_case_ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=snake_case_ )
| 27 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase__ ( self ):
torch.manual_seed(0 )
_A = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def lowerCAmelCase__ ( self ):
_A = self.dummy_uncond_unet
_A = KarrasVeScheduler()
_A = KarrasVePipeline(unet=snake_case_ , scheduler=snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_A = torch.manual_seed(0 )
_A = pipe(num_inference_steps=2 , generator=snake_case_ , output_type='numpy' ).images
_A = torch.manual_seed(0 )
_A = pipe(num_inference_steps=2 , generator=snake_case_ , output_type='numpy' , return_dict=snake_case_ )[0]
_A = image[0, -3:, -3:, -1]
_A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_A = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowerCamelCase( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self ):
_A = 'google/ncsnpp-celebahq-256'
_A = UNetaDModel.from_pretrained(snake_case_ )
_A = KarrasVeScheduler()
_A = KarrasVePipeline(unet=snake_case_ , scheduler=snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
_A = torch.manual_seed(0 )
_A = pipe(num_inference_steps=20 , generator=snake_case_ , output_type='numpy' ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_A = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 27 | 1 |
"""simple docstring"""
from typing import Any
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = data
lowerCAmelCase : Any = None
def __repr__( self ):
"""simple docstring"""
return f"""Node({self.data})"""
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = None
def __iter__( self ):
"""simple docstring"""
lowerCAmelCase : Any = self.head
while node:
yield node.data
lowerCAmelCase : Optional[int] = node.next
def __len__( self ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self ):
"""simple docstring"""
return "->".join([str(snake_case__ ) for item in self] )
def __getitem__( self , snake_case__ ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , snake_case__ , snake_case__ ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
lowerCAmelCase : Union[str, Any] = self.head
for _ in range(snake_case__ ):
lowerCAmelCase : int = current.next
lowerCAmelCase : List[str] = data
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
self.insert_nth(len(self ) , snake_case__ )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
self.insert_nth(0 , snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
lowerCAmelCase : Optional[int] = Node(snake_case__ )
if self.head is None:
lowerCAmelCase : Any = new_node
elif index == 0:
lowerCAmelCase : Any = self.head # link new_node to head
lowerCAmelCase : Union[str, Any] = new_node
else:
lowerCAmelCase : List[str] = self.head
for _ in range(index - 1 ):
lowerCAmelCase : int = temp.next
lowerCAmelCase : int = temp.next
lowerCAmelCase : Dict = new_node
def lowercase__ ( self ): # print every node data
"""simple docstring"""
print(self )
def lowercase__ ( self ):
"""simple docstring"""
return self.delete_nth(0 )
def lowercase__ ( self ): # delete from tail
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def lowercase__ ( self , snake_case__ = 0 ):
"""simple docstring"""
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
lowerCAmelCase : List[Any] = self.head # default first node
if index == 0:
lowerCAmelCase : Optional[int] = self.head.next
else:
lowerCAmelCase : List[str] = self.head
for _ in range(index - 1 ):
lowerCAmelCase : Union[str, Any] = temp.next
lowerCAmelCase : Optional[Any] = temp.next
lowerCAmelCase : Any = temp.next.next
return delete_node.data
def lowercase__ ( self ):
"""simple docstring"""
return self.head is None
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = None
lowerCAmelCase : Optional[int] = self.head
while current:
# Store the current node's next node.
lowerCAmelCase : List[Any] = current.next
# Make the current node's next point backwards
lowerCAmelCase : Dict = prev
# Make the previous node be the current node
lowerCAmelCase : List[str] = current
# Make the current node the next node (to progress iteration)
lowerCAmelCase : int = next_node
# Return prev in order to put the head at the end
lowerCAmelCase : Tuple = prev
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Tuple = LinkedList()
assert linked_list.is_empty() is True
assert str(SCREAMING_SNAKE_CASE ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(1_0 ):
assert len(SCREAMING_SNAKE_CASE ) == i
linked_list.insert_nth(SCREAMING_SNAKE_CASE , i + 1 )
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(1 , 1_1 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(1_1 )
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(0 , 1_2 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 1_0
assert linked_list.delete_tail() == 1_1
assert len(SCREAMING_SNAKE_CASE ) == 9
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(1 , 1_0 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowerCAmelCase : Optional[Any] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(SCREAMING_SNAKE_CASE ) == "->".join(str(SCREAMING_SNAKE_CASE ) for i in range(-8 , 1 ) )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : List[str] = [
-9,
1_0_0,
Node(7_7_3_4_5_1_1_2 ),
"dlrow olleH",
7,
5_5_5_5,
0,
-192.55_555,
"Hello, world!",
77.9,
Node(1_0 ),
None,
None,
12.20,
]
lowerCAmelCase : List[str] = LinkedList()
for i in test_input:
linked_list.insert_tail(SCREAMING_SNAKE_CASE )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(SCREAMING_SNAKE_CASE ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowerCAmelCase : str = linked_list.delete_head()
assert result == -9
assert (
str(SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowerCAmelCase : Union[str, Any] = linked_list.delete_tail()
assert result == 12.2
assert (
str(SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowerCAmelCase : List[str] = linked_list.delete_nth(1_0 )
assert result is None
assert (
str(SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(SCREAMING_SNAKE_CASE )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(SCREAMING_SNAKE_CASE )
assert (
str(SCREAMING_SNAKE_CASE )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(SCREAMING_SNAKE_CASE )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def a__ ( ):
'''simple docstring'''
from doctest import testmod
testmod()
lowerCAmelCase : Optional[Any] = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(SCREAMING_SNAKE_CASE )
print("\nReading/changing Node data using indexing:" )
print(f"""Element at Position 1: {linked_list[1]}""" )
lowerCAmelCase : Any = input("Enter New Value: " ).strip()
print("New list:" )
print(SCREAMING_SNAKE_CASE )
print(f"""length of linked_list is : {len(SCREAMING_SNAKE_CASE )}""" )
if __name__ == "__main__":
main()
| 705 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 16_00, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 16_00, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=snake_case__ , )
assert hasattr(self , "env" )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = {
"enabled": True,
"processes_per_host": 8,
}
lowerCAmelCase : List[Any] = {
"enabled": True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 4,
"ddp": True,
},
}
lowerCAmelCase : List[Any] = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options}
lowerCAmelCase : Optional[Any] = "trainer" if self.script == "run_glue.py" else "smtrainer"
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=snake_case__ , instance_type=self.instance_type , debugger_hook_config=snake_case__ , hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 500,
} , metric_definitions=self.env.metric_definitions , distribution=snake_case__ , py_version="py36" , )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
TrainingJobAnalytics(snake_case__ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = self.create_estimator(snake_case__ )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase : Tuple = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase : Tuple = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
lowerCAmelCase : Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase : Dict = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , snake_case__ )
| 681 | 0 |
'''simple docstring'''
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
SCREAMING_SNAKE_CASE_ = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
SCREAMING_SNAKE_CASE_ = typing.Union[np.floataa, int, float] # noqa: UP007
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return np.sqrt(np.sum((np.asarray(SCREAMING_SNAKE_CASE__ ) - np.asarray(SCREAMING_SNAKE_CASE__ )) ** 2 ) )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return sum((va - va) ** 2 for va, va in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) ** (1 / 2)
if __name__ == "__main__":
def lowerCAmelCase__ ( ):
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=10000 , globals=globals() , ) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=10000 , globals=globals() , ) )
benchmark()
| 597 |
'''simple docstring'''
import collections
import importlib.util
import os
import re
from pathlib import Path
SCREAMING_SNAKE_CASE_ = "src/transformers"
# Matches is_xxx_available()
SCREAMING_SNAKE_CASE_ = re.compile(r"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
SCREAMING_SNAKE_CASE_ = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
SCREAMING_SNAKE_CASE_ = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
SCREAMING_SNAKE_CASE_ = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
SCREAMING_SNAKE_CASE_ = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
SCREAMING_SNAKE_CASE_ = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
SCREAMING_SNAKE_CASE_ = re.compile("^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
SCREAMING_SNAKE_CASE_ = re.compile("^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
SCREAMING_SNAKE_CASE_ = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
SCREAMING_SNAKE_CASE_ = re.compile(r"^\s*try:")
# Catches a line with else:
SCREAMING_SNAKE_CASE_ = re.compile(r"^\s*else:")
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
if _re_test_backend.search(SCREAMING_SNAKE_CASE__ ) is None:
return None
__a : Optional[Any] = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE__ )]
backends.sort()
return "_and_".join(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
__a : Union[str, Any] = f.readlines()
__a : Any = 0
while line_index < len(SCREAMING_SNAKE_CASE__ ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(SCREAMING_SNAKE_CASE__ ):
return None
# First grab the objects without a specific backend in _import_structure
__a : List[Any] = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
__a : Union[str, Any] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE__ ):
__a : Optional[int] = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE__ ).groups()[0]
__a : int = re.findall('\[([^\]]+)\]' , SCREAMING_SNAKE_CASE__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
__a : Union[str, Any] = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE__ )
if single_line_import_search is not None:
__a : Dict = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(SCREAMING_SNAKE_CASE__ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE__ )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
__a : Optional[Any] = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
__a : Tuple = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__a : Union[str, Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__a : int = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
__a : List[Any] = lines[line_index]
if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE__ ) is not None:
objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE__ ).groups()[0] )
elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE__ ) is not None:
__a : Optional[Any] = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE__ ).groups()[0].split(', ' )
__a : Any = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE__ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE__ )
elif _re_between_brackets.search(SCREAMING_SNAKE_CASE__ ) is not None:
__a : List[Any] = _re_between_brackets.search(SCREAMING_SNAKE_CASE__ ).groups()[0].split(', ' )
__a : Union[str, Any] = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE__ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE__ )
elif _re_quote_object.search(SCREAMING_SNAKE_CASE__ ) is not None:
objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE__ ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
__a : List[Any] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__a : Any = []
while (
line_index < len(SCREAMING_SNAKE_CASE__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
__a : Any = lines[line_index]
__a : Optional[int] = _re_import.search(SCREAMING_SNAKE_CASE__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
__a : List[Any] = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(SCREAMING_SNAKE_CASE__ ):
# If the line is an if is_backend_available, we grab all objects associated.
__a : Any = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__a : List[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__a : List[str] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
__a : int = lines[line_index]
__a : List[Any] = _re_import.search(SCREAMING_SNAKE_CASE__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
__a : Tuple = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
def find_duplicates(SCREAMING_SNAKE_CASE__ ):
return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__a : Any = []
for key in import_dict_objects.keys():
__a : List[str] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
__a : int = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__a : List[Any] = 'base imports' if key == 'none' else f'''{key} backend'''
errors.append(f'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def lowerCAmelCase__ ( ):
__a : List[Any] = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE__ ):
if "__init__.py" in files:
__a : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , '__init__.py' )
__a : Optional[Any] = parse_init(SCREAMING_SNAKE_CASE__ )
if objects is not None:
__a : str = analyze_results(*SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
__a : Optional[int] = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('\n'.join(SCREAMING_SNAKE_CASE__ ) )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
raise ValueError('\n\n'.join(SCREAMING_SNAKE_CASE__ ) )
def lowerCAmelCase__ ( ):
__a : Optional[Any] = []
for path, directories, files in os.walk(SCREAMING_SNAKE_CASE__ ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(SCREAMING_SNAKE_CASE__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(SCREAMING_SNAKE_CASE__ ) / folder).glob('*.py' ) ) ) == 0:
continue
__a : Optional[Any] = str((Path(SCREAMING_SNAKE_CASE__ ) / folder).relative_to(SCREAMING_SNAKE_CASE__ ) )
__a : str = short_path.replace(os.path.sep , '.' )
submodules.append(SCREAMING_SNAKE_CASE__ )
for fname in files:
if fname == "__init__.py":
continue
__a : str = str((Path(SCREAMING_SNAKE_CASE__ ) / fname).relative_to(SCREAMING_SNAKE_CASE__ ) )
__a : str = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(SCREAMING_SNAKE_CASE__ )
return submodules
SCREAMING_SNAKE_CASE_ = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
]
def lowerCAmelCase__ ( ):
# This is to make sure the transformers module imported is the one in the repo.
__a : int = importlib.util.spec_from_file_location(
'transformers' , os.path.join(SCREAMING_SNAKE_CASE__ , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
__a : Optional[Any] = spec.loader.load_module()
__a : Any = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(SCREAMING_SNAKE_CASE__ ) > 0:
__a : Union[str, Any] = '\n'.join(f'''- {module}''' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
f'''{list_of_modules}\n'''
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 597 | 1 |
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 'Tobias Carryer'
from time import time
class a__ :
def __init__( self :int , _lowerCamelCase :str , _lowerCamelCase :Optional[Any] , _lowerCamelCase :Tuple , _lowerCamelCase :str=int(time() ) ): # noqa: B008
'''simple docstring'''
UpperCamelCase_ : str =multiplier
UpperCamelCase_ : str =increment
UpperCamelCase_ : str =modulo
UpperCamelCase_ : int =seed
def lowerCamelCase_ ( self :List[Any] ):
'''simple docstring'''
UpperCamelCase_ : List[Any] =(self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
__SCREAMING_SNAKE_CASE = LinearCongruentialGenerator(1_664_525, 1_013_904_223, 2 << 31)
while True:
print(lcg.next_number())
| 717 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class a__ ( A__ ):
def lowerCamelCase_ ( self :Any ):
'''simple docstring'''
UpperCamelCase_ : List[str] =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowerCamelCase , 'tf_padding' ) )
self.parent.assertTrue(hasattr(_lowerCamelCase , 'depth_multiplier' ) )
class a__ :
def __init__( self :Tuple , _lowerCamelCase :int , _lowerCamelCase :Optional[Any]=13 , _lowerCamelCase :List[Any]=3 , _lowerCamelCase :Optional[Any]=32 , _lowerCamelCase :str=0.25 , _lowerCamelCase :str=8 , _lowerCamelCase :str=8 , _lowerCamelCase :Tuple=6 , _lowerCamelCase :Optional[Any]=32 , _lowerCamelCase :Union[str, Any]=True , _lowerCamelCase :int=True , _lowerCamelCase :Optional[int]=True , _lowerCamelCase :Tuple="relu6" , _lowerCamelCase :List[Any]=1_280 , _lowerCamelCase :Optional[int]=0.1 , _lowerCamelCase :Optional[Any]=0.02 , _lowerCamelCase :Dict=True , _lowerCamelCase :List[str]=True , _lowerCamelCase :List[str]=10 , _lowerCamelCase :List[Any]=None , ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] =parent
UpperCamelCase_ : Optional[Any] =batch_size
UpperCamelCase_ : List[str] =num_channels
UpperCamelCase_ : Union[str, Any] =image_size
UpperCamelCase_ : Union[str, Any] =depth_multiplier
UpperCamelCase_ : Optional[Any] =depth_divisible_by
UpperCamelCase_ : Optional[Any] =min_depth
UpperCamelCase_ : List[Any] =expand_ratio
UpperCamelCase_ : Any =tf_padding
UpperCamelCase_ : List[str] =output_stride
UpperCamelCase_ : Tuple =first_layer_is_expansion
UpperCamelCase_ : Any =finegrained_output
UpperCamelCase_ : Dict =hidden_act
UpperCamelCase_ : int =last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
UpperCamelCase_ : Optional[int] =classifier_dropout_prob
UpperCamelCase_ : str =use_labels
UpperCamelCase_ : List[Any] =is_training
UpperCamelCase_ : Tuple =num_labels
UpperCamelCase_ : Optional[int] =initializer_range
UpperCamelCase_ : Union[str, Any] =scope
def lowerCamelCase_ ( self :str ):
'''simple docstring'''
UpperCamelCase_ : str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ : Dict =None
UpperCamelCase_ : Dict =None
if self.use_labels:
UpperCamelCase_ : List[str] =ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase_ : List[Any] =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase_ : Any =self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase_ ( self :Any ):
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self :List[str] , _lowerCamelCase :Optional[int] , _lowerCamelCase :str , _lowerCamelCase :Tuple , _lowerCamelCase :List[str] ):
'''simple docstring'''
UpperCamelCase_ : List[Any] =MobileNetVaModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_ : List[Any] =model(_lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def lowerCamelCase_ ( self :Dict , _lowerCamelCase :int , _lowerCamelCase :Optional[Any] , _lowerCamelCase :str , _lowerCamelCase :Optional[Any] ):
'''simple docstring'''
UpperCamelCase_ : Tuple =self.num_labels
UpperCamelCase_ : List[str] =MobileNetVaForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_ : List[str] =model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self :Any , _lowerCamelCase :Union[str, Any] , _lowerCamelCase :Union[str, Any] , _lowerCamelCase :str , _lowerCamelCase :Dict ):
'''simple docstring'''
UpperCamelCase_ : Tuple =self.num_labels
UpperCamelCase_ : int =MobileNetVaForSemanticSegmentation(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
UpperCamelCase_ : Dict =model(_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
UpperCamelCase_ : int =model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self :Any ):
'''simple docstring'''
UpperCamelCase_ : str =self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ : int =config_and_inputs
UpperCamelCase_ : Dict ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a__ ( A__ , A__ , unittest.TestCase ):
UpperCAmelCase__ = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCAmelCase__ = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def lowerCamelCase_ ( self :Union[str, Any] ):
'''simple docstring'''
UpperCamelCase_ : Dict =MobileNetVaModelTester(self )
UpperCamelCase_ : Any =MobileNetVaConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def lowerCamelCase_ ( self :int ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV2 does not use inputs_embeds' )
def lowerCamelCase_ ( self :int ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not support input and output embeddings' )
def lowerCamelCase_ ( self :str ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not output attentions' )
def lowerCamelCase_ ( self :int ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self :List[Any] ):
'''simple docstring'''
UpperCamelCase_ , UpperCamelCase_ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : Tuple =model_class(_lowerCamelCase )
UpperCamelCase_ : Dict =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ : Optional[int] =[*signature.parameters.keys()]
UpperCamelCase_ : List[str] =['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def lowerCamelCase_ ( self :List[str] ):
'''simple docstring'''
UpperCamelCase_ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def lowerCamelCase_ ( self :Dict ):
'''simple docstring'''
def check_hidden_states_output(_lowerCamelCase :List[Any] , _lowerCamelCase :List[Any] , _lowerCamelCase :List[Any] ):
UpperCamelCase_ : List[str] =model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
UpperCamelCase_ : str =model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
UpperCamelCase_ : Optional[Any] =outputs.hidden_states
UpperCamelCase_ : List[str] =16
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
UpperCamelCase_ , UpperCamelCase_ : str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ : Dict =True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase_ : Dict =True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_ ( self :Any ):
'''simple docstring'''
UpperCamelCase_ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
def lowerCamelCase_ ( self :Optional[Any] ):
'''simple docstring'''
UpperCamelCase_ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCamelCase )
@slow
def lowerCamelCase_ ( self :str ):
'''simple docstring'''
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ : List[str] =MobileNetVaModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def A_ ( ):
UpperCamelCase_ : Dict =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self :Tuple ):
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224' ) if is_vision_available() else None
)
@slow
def lowerCamelCase_ ( self :Tuple ):
'''simple docstring'''
UpperCamelCase_ : Optional[int] =MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224' ).to(_lowerCamelCase )
UpperCamelCase_ : List[Any] =self.default_image_processor
UpperCamelCase_ : List[Any] =prepare_img()
UpperCamelCase_ : List[str] =image_processor(images=_lowerCamelCase , return_tensors='pt' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_ : List[Any] =model(**_lowerCamelCase )
# verify the logits
UpperCamelCase_ : Optional[int] =torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
UpperCamelCase_ : Optional[Any] =torch.tensor([0.2445, -1.1993, 0.1905] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1E-4 ) )
@slow
def lowerCamelCase_ ( self :Tuple ):
'''simple docstring'''
UpperCamelCase_ : Dict =MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
UpperCamelCase_ : Optional[int] =model.to(_lowerCamelCase )
UpperCamelCase_ : Dict =MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
UpperCamelCase_ : Any =prepare_img()
UpperCamelCase_ : List[Any] =image_processor(images=_lowerCamelCase , return_tensors='pt' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_ : Any =model(**_lowerCamelCase )
UpperCamelCase_ : Any =outputs.logits
# verify the logits
UpperCamelCase_ : Optional[int] =torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , _lowerCamelCase )
UpperCamelCase_ : List[Any] =torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=_lowerCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _lowerCamelCase , atol=1E-4 ) )
| 395 | 0 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = '▁'
lowerCamelCase__ : Any = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
lowerCamelCase__ : Any = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
lowerCamelCase__ : Union[str, Any] = {
'facebook/s2t-small-librispeech-asr': 1_024,
}
lowerCamelCase__ : Dict = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
lowerCamelCase__ : List[Any] = {'mustc': MUSTC_LANGS}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = MAX_MODEL_INPUT_SIZES
lowercase_ = ["input_ids", "attention_mask"]
lowercase_ = []
def __init__( self : Any , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : List[str]="<s>" , _lowerCAmelCase : Tuple="</s>" , _lowerCAmelCase : str="<pad>" , _lowerCAmelCase : List[Any]="<unk>" , _lowerCAmelCase : int=False , _lowerCAmelCase : int=False , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Optional[Dict[str, Any]] = None , **_lowerCAmelCase : str , ):
SCREAMING_SNAKE_CASE_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , do_upper_case=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , tgt_lang=_lowerCAmelCase , lang_codes=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = do_upper_case
SCREAMING_SNAKE_CASE_ = do_lower_case
SCREAMING_SNAKE_CASE_ = load_json(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_ = spm_file
SCREAMING_SNAKE_CASE_ = load_spm(_lowerCAmelCase , self.sp_model_kwargs )
if lang_codes is not None:
SCREAMING_SNAKE_CASE_ = lang_codes
SCREAMING_SNAKE_CASE_ = LANGUAGES[lang_codes]
SCREAMING_SNAKE_CASE_ = [F"<lang:{lang}>" for lang in self.langs]
SCREAMING_SNAKE_CASE_ = {lang: self.sp_model.PieceToId(F"<lang:{lang}>" ) for lang in self.langs}
SCREAMING_SNAKE_CASE_ = self.lang_tokens
SCREAMING_SNAKE_CASE_ = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
SCREAMING_SNAKE_CASE_ = {}
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
return len(self.encoder )
@property
def lowerCAmelCase_ ( self : Dict ):
return self._tgt_lang
@tgt_lang.setter
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : List[Any] ):
SCREAMING_SNAKE_CASE_ = new_tgt_lang
self.set_tgt_lang_special_tokens(_lowerCAmelCase )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_ = self.lang_code_to_id[tgt_lang]
SCREAMING_SNAKE_CASE_ = [lang_code_id]
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : str ):
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : Optional[Any] ):
return self.encoder.get(_lowerCAmelCase , self.encoder[self.unk_token] )
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : int ):
return self.decoder.get(_lowerCAmelCase , self.unk_token )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : List[str] ):
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
SCREAMING_SNAKE_CASE_ = self.sp_model.decode(_lowerCAmelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
SCREAMING_SNAKE_CASE_ = []
else:
current_sub_tokens.append(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.sp_model.decode(_lowerCAmelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : str=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : List[int] , _lowerCAmelCase : Optional[List[int]] = None , _lowerCAmelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = [1] * len(self.prefix_tokens )
SCREAMING_SNAKE_CASE_ = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowerCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(_lowerCAmelCase )) + ([0] * len(_lowerCAmelCase )) + suffix_ones
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ = None
return state
def __setstate__( self : int , _lowerCAmelCase : Dict ):
SCREAMING_SNAKE_CASE_ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = load_spm(self.spm_file , self.sp_model_kwargs )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
SCREAMING_SNAKE_CASE_ = Path(_lowerCAmelCase )
assert save_dir.is_dir(), F"{save_directory} should be a directory"
SCREAMING_SNAKE_CASE_ = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
SCREAMING_SNAKE_CASE_ = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , _lowerCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _lowerCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(_lowerCAmelCase , 'wb' ) as fi:
SCREAMING_SNAKE_CASE_ = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (str(_lowerCAmelCase ), str(_lowerCAmelCase ))
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
SCREAMING_SNAKE_CASE_ = sentencepiece.SentencePieceProcessor(**__UpperCAmelCase )
spm.Load(str(__UpperCAmelCase ) )
return spm
def UpperCAmelCase_ ( __UpperCAmelCase : str ) -> Union[Dict, List]:
with open(__UpperCAmelCase , 'r' ) as f:
return json.load(__UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str ) -> None:
with open(__UpperCAmelCase , 'w' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase , indent=2 ) | 31 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Any , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : Any=3 , _lowerCAmelCase : Tuple=64 , _lowerCAmelCase : List[str]=None ):
SCREAMING_SNAKE_CASE_ = np.random.default_rng(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = length
SCREAMING_SNAKE_CASE_ = rng.normal(size=(length,) ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : Optional[int] ):
return self.length
def __getitem__( self : str , _lowerCAmelCase : Union[str, Any] ):
return {"x": self.x[i], "y": self.y[i]}
class lowerCamelCase_ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , _lowerCAmelCase : Dict=0 , _lowerCAmelCase : List[str]=0 , _lowerCAmelCase : str=False ):
super().__init__()
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_ = True
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Union[str, Any]=None ):
if self.first_batch:
print(F"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}" )
SCREAMING_SNAKE_CASE_ = False
return x * self.a[0] + self.b[0]
class lowerCamelCase_ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , _lowerCAmelCase : Any=0 , _lowerCAmelCase : Any=0 , _lowerCAmelCase : Optional[Any]=False ):
super().__init__()
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor(_lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_ = torch.nn.Parameter(torch.tensor(_lowerCAmelCase ).float() )
SCREAMING_SNAKE_CASE_ = True
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Optional[int]=None ):
if self.first_batch:
print(F"Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}" )
SCREAMING_SNAKE_CASE_ = False
return x * self.a + self.b
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : int = 16 ) -> Union[str, Any]:
from datasets import load_dataset
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('bert-base-cased' )
SCREAMING_SNAKE_CASE_ = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
SCREAMING_SNAKE_CASE_ = load_dataset('csv' , data_files=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = datasets['train'].unique('label' )
SCREAMING_SNAKE_CASE_ = {v: i for i, v in enumerate(__UpperCAmelCase )}
def tokenize_function(__UpperCAmelCase : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_ = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='max_length' )
if "label" in examples:
SCREAMING_SNAKE_CASE_ = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE_ = datasets.map(
__UpperCAmelCase , batched=__UpperCAmelCase , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(__UpperCAmelCase : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCAmelCase , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return tokenizer.pad(__UpperCAmelCase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_ = DataLoader(tokenized_datasets['train'] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=2 )
SCREAMING_SNAKE_CASE_ = DataLoader(tokenized_datasets['validation'] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader | 31 | 1 |
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 713 |
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
A__ : Optional[int] = False
class snake_case__ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def A_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : Any ) -> Tuple:
'''simple docstring'''
__snake_case : List[str] = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__snake_case : str = torch.manual_seed(0 )
__snake_case : int = pipe.dual_guided(
prompt='first prompt' , image=__a , text_to_image_strength=0.7_5 , generator=__a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__a )
__snake_case : Union[str, Any] = VersatileDiffusionPipeline.from_pretrained(__a , torch_dtype=torch.floataa )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : List[str] = generator.manual_seed(0 )
__snake_case : Tuple = pipe.dual_guided(
prompt='first prompt' , image=__a , text_to_image_strength=0.7_5 , generator=__a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def A_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
__snake_case : str = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : Optional[Any] = 'cyberpunk 2077'
__snake_case : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__snake_case : List[str] = torch.manual_seed(0 )
__snake_case : Tuple = pipe.dual_guided(
prompt=__a , image=__a , text_to_image_strength=0.7_5 , generator=__a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
__snake_case : List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case : List[str] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
__snake_case : Tuple = 'A painting of a squirrel eating a burger '
__snake_case : str = torch.manual_seed(0 )
__snake_case : Optional[Any] = pipe.text_to_image(
prompt=__a , generator=__a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
__snake_case : Dict = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case : Optional[int] = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
__snake_case : int = pipe.image_variation(__a , generator=__a , output_type='numpy' ).images
__snake_case : Any = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case : str = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 124 | 0 |
"""simple docstring"""
__A = 9.8_0665
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = g ) ->float:
"""simple docstring"""
if fluid_density <= 0:
raise ValueError('Impossible fluid density' )
if volume < 0:
raise ValueError('Impossible Object volume' )
if gravity <= 0:
raise ValueError('Impossible Gravity' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 93 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=7 ):
'''simple docstring'''
lowerCamelCase : Dict = None
if token is not None:
lowerCamelCase : Union[str, Any] = {"Accept": "application/vnd.github+json", "Authorization": f"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
lowerCamelCase : Optional[int] = "636036"
lowerCamelCase : List[Any] = f"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += f"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
lowerCamelCase : Union[str, Any] = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json()
return result["workflow_runs"]
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : int = get_daily_ci_runs(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Tuple = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowerCamelCase : List[Any] = workflow_run["id"]
break
return workflow_run_id
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Optional[int] = get_last_daily_ci_runs(SCREAMING_SNAKE_CASE_ )
if workflow_run_id is not None:
lowerCamelCase : Tuple = get_artifacts_links(worflow_run_id=SCREAMING_SNAKE_CASE_ , token=SCREAMING_SNAKE_CASE_ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowerCamelCase : List[Any] = artifacts_links[artifact_name]
download_artifact(
artifact_name=SCREAMING_SNAKE_CASE_ , artifact_url=SCREAMING_SNAKE_CASE_ , output_dir=SCREAMING_SNAKE_CASE_ , token=SCREAMING_SNAKE_CASE_ )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
get_last_daily_ci_artifacts(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[str] = {}
for artifact_name in artifact_names:
lowerCamelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE_ , f"""{artifact_name}.zip""" )
if os.path.isfile(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : int = {}
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
# read the file
with z.open(SCREAMING_SNAKE_CASE_ ) as f:
lowerCamelCase : int = f.read().decode("UTF-8" )
return results
| 340 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''configuration_nllb_moe''': [
'''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''NllbMoeConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : str = [
'''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NllbMoeForConditionalGeneration''',
'''NllbMoeModel''',
'''NllbMoePreTrainedModel''',
'''NllbMoeTop2Router''',
'''NllbMoeSparseMLP''',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 137 |
"""simple docstring"""
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_SCREAMING_SNAKE_CASE : List[str] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
_SCREAMING_SNAKE_CASE : int = {
# fairseq:
'''wmt19-ru-en''': {'''length_penalty''': 1.1},
'''wmt19-en-ru''': {'''length_penalty''': 1.15},
'''wmt19-en-de''': {'''length_penalty''': 1.0},
'''wmt19-de-en''': {'''length_penalty''': 1.1},
# allenai:
'''wmt16-en-de-dist-12-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-dist-6-1''': {'''length_penalty''': 0.6},
'''wmt16-en-de-12-1''': {'''length_penalty''': 0.8},
'''wmt19-de-en-6-6-base''': {'''length_penalty''': 0.6},
'''wmt19-de-en-6-6-big''': {'''length_penalty''': 0.6},
}
# this remaps the different models to their organization names
_SCREAMING_SNAKE_CASE : List[str] = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_SCREAMING_SNAKE_CASE : Optional[int] = '''facebook'''
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
_SCREAMING_SNAKE_CASE : Optional[Any] = '''allenai'''
def lowerCamelCase__ ( _lowerCamelCase : int ) -> Any:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
lowerCamelCase_ = dict((re.sub(r'@@$' , '' , _lowerCamelCase ), v) if k.endswith('@@' ) else (re.sub(r'$' , '</w>' , _lowerCamelCase ), v) for k, v in d.items() )
lowerCamelCase_ = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F'''{k}</w>''']
lowerCamelCase_ = d[k] # restore
return da
def lowerCamelCase__ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int ) -> Optional[Any]:
# prep
assert os.path.exists(_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
print(F'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
lowerCamelCase_ = basename(_lowerCamelCase )
lowerCamelCase_ = dirname(_lowerCamelCase )
lowerCamelCase_ = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowerCamelCase_ = cls.hub_models()
lowerCamelCase_ = {'bpe': 'fastbpe', 'tokenizer': 'moses'}
lowerCamelCase_ = '.'
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F'''using checkpoint {checkpoint_file}''' )
lowerCamelCase_ = hub_utils.from_pretrained(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , archive_map=_lowerCamelCase , **_lowerCamelCase )
lowerCamelCase_ = vars(chkpt['args']['model'] )
lowerCamelCase_ = args['source_lang']
lowerCamelCase_ = args['target_lang']
lowerCamelCase_ = dirname(_lowerCamelCase )
lowerCamelCase_ = basename(_lowerCamelCase )
# dicts
lowerCamelCase_ = os.path.join(_lowerCamelCase , F'''dict.{src_lang}.txt''' )
lowerCamelCase_ = os.path.join(_lowerCamelCase , F'''dict.{tgt_lang}.txt''' )
lowerCamelCase_ = Dictionary.load(_lowerCamelCase )
lowerCamelCase_ = rewrite_dict_keys(src_dict.indices )
lowerCamelCase_ = len(_lowerCamelCase )
lowerCamelCase_ = os.path.join(_lowerCamelCase , 'vocab-src.json' )
print(F'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowerCamelCase_ = True
for k in src_vocab.keys():
if not k.islower():
lowerCamelCase_ = False
break
lowerCamelCase_ = Dictionary.load(_lowerCamelCase )
lowerCamelCase_ = rewrite_dict_keys(tgt_dict.indices )
lowerCamelCase_ = len(_lowerCamelCase )
lowerCamelCase_ = os.path.join(_lowerCamelCase , 'vocab-tgt.json' )
print(F'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# merges_file (bpecodes)
lowerCamelCase_ = os.path.join(_lowerCamelCase , VOCAB_FILES_NAMES['merges_file'] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowerCamelCase_ = os.path.join(_lowerCamelCase , _lowerCamelCase )
if os.path.exists(_lowerCamelCase ):
break
with open(_lowerCamelCase , encoding='utf-8' ) as fin:
lowerCamelCase_ = fin.read()
lowerCamelCase_ = re.sub(r' \d+$' , '' , _lowerCamelCase , 0 , re.M ) # remove frequency number
print(F'''Generating {merges_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as fout:
fout.write(_lowerCamelCase )
# model config
lowerCamelCase_ = os.path.join(_lowerCamelCase , 'config.json' )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F'''need to extend tokenizer to support bpe={args["bpe"]}'''
assert args["tokenizer"] == "moses", F'''need to extend tokenizer to support bpe={args["tokenizer"]}'''
lowerCamelCase_ = {
'architectures': ['FSMTForConditionalGeneration'],
'model_type': 'fsmt',
'activation_dropout': args['activation_dropout'],
'activation_function': 'relu',
'attention_dropout': args['attention_dropout'],
'd_model': args['decoder_embed_dim'],
'dropout': args['dropout'],
'init_std': 0.02,
'max_position_embeddings': args['max_source_positions'],
'num_hidden_layers': args['encoder_layers'],
'src_vocab_size': src_vocab_size,
'tgt_vocab_size': tgt_vocab_size,
'langs': [src_lang, tgt_lang],
'encoder_attention_heads': args['encoder_attention_heads'],
'encoder_ffn_dim': args['encoder_ffn_embed_dim'],
'encoder_layerdrop': args['encoder_layerdrop'],
'encoder_layers': args['encoder_layers'],
'decoder_attention_heads': args['decoder_attention_heads'],
'decoder_ffn_dim': args['decoder_ffn_embed_dim'],
'decoder_layerdrop': args['decoder_layerdrop'],
'decoder_layers': args['decoder_layers'],
'bos_token_id': 0,
'pad_token_id': 1,
'eos_token_id': 2,
'is_encoder_decoder': True,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_all_embeddings'],
}
# good hparam defaults to start with
lowerCamelCase_ = 5
lowerCamelCase_ = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowerCamelCase_ = best_score_hparams[model_dir]['length_penalty']
else:
lowerCamelCase_ = 1.0
print(F'''Generating {fsmt_model_config_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# tokenizer config
lowerCamelCase_ = os.path.join(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase_ = {
'langs': [src_lang, tgt_lang],
'model_max_length': 1024,
'do_lower_case': do_lower_case,
}
print(F'''Generating {fsmt_tokenizer_config_file}''' )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(_lowerCamelCase , ensure_ascii=_lowerCamelCase , indent=_lowerCamelCase ) )
# model
lowerCamelCase_ = chkpt['models'][0]
lowerCamelCase_ = model.state_dict()
# rename keys to start with 'model.'
lowerCamelCase_ = OrderedDict(('model.' + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowerCamelCase_ = [
'model.model',
'model.encoder.version',
'model.decoder.version',
'model.encoder_embed_tokens.weight',
'model.decoder_embed_tokens.weight',
'model.encoder.embed_positions._float_tensor',
'model.decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
model_state_dict.pop(_lowerCamelCase , _lowerCamelCase )
lowerCamelCase_ = FSMTConfig.from_pretrained(_lowerCamelCase )
lowerCamelCase_ = FSMTForConditionalGeneration(_lowerCamelCase )
# check that it loads ok
model_new.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
# save
lowerCamelCase_ = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(F'''Generating {pytorch_weights_dump_path}''' )
torch.save(_lowerCamelCase , _lowerCamelCase )
print('Conversion is done!' )
print('\nLast step is to upload the files to s3' )
print(F'''cd {data_root}''' )
print(F'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fsmt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 137 | 1 |
from __future__ import annotations
import math
def lowercase_ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : bool , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if not scores:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , )
)
def lowercase_ ( ):
"""simple docstring"""
snake_case__ : Any =[90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
snake_case__ : List[Any] =math.log(len(SCREAMING_SNAKE_CASE ) , 2 )
print(F'''Optimal value : {minimax(0 , 0 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 381 |
import math
def lowercase_ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
return math.pow(SCREAMING_SNAKE_CASE , 2 ) - a
def lowercase_ ( SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
return 2 * x
def lowercase_ ( SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
snake_case__ : Dict =2.0
while start <= a:
snake_case__ : List[Any] =math.pow(SCREAMING_SNAKE_CASE , 2 )
return start
def lowercase_ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : int = 99_99 , SCREAMING_SNAKE_CASE : float = 0.00_0000_0000_0001 ):
"""simple docstring"""
if a < 0:
raise ValueError('''math domain error''' )
snake_case__ : List[str] =get_initial_point(SCREAMING_SNAKE_CASE )
for _ in range(SCREAMING_SNAKE_CASE ):
snake_case__ : Union[str, Any] =value
snake_case__ : Any =value - fx(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) / fx_derivative(SCREAMING_SNAKE_CASE )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 381 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
__snake_case = "layoutlmv3"
def __init__( self , _UpperCAmelCase=5_02_65 , _UpperCAmelCase=7_68 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=30_72 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=5_12 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-5 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase=10_24 , _UpperCAmelCase=1_28 , _UpperCAmelCase=1_28 , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=1_28 , _UpperCAmelCase=64 , _UpperCAmelCase=2_56 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=2_24 , _UpperCAmelCase=3 , _UpperCAmelCase=16 , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(
vocab_size=_UpperCAmelCase , hidden_size=_UpperCAmelCase , num_hidden_layers=_UpperCAmelCase , num_attention_heads=_UpperCAmelCase , intermediate_size=_UpperCAmelCase , hidden_act=_UpperCAmelCase , hidden_dropout_prob=_UpperCAmelCase , attention_probs_dropout_prob=_UpperCAmelCase , max_position_embeddings=_UpperCAmelCase , type_vocab_size=_UpperCAmelCase , initializer_range=_UpperCAmelCase , layer_norm_eps=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
snake_case_ = max_ad_position_embeddings
snake_case_ = coordinate_size
snake_case_ = shape_size
snake_case_ = has_relative_attention_bias
snake_case_ = rel_pos_bins
snake_case_ = max_rel_pos
snake_case_ = has_spatial_attention_bias
snake_case_ = rel_ad_pos_bins
snake_case_ = max_rel_ad_pos
snake_case_ = text_embed
snake_case_ = visual_embed
snake_case_ = input_size
snake_case_ = num_channels
snake_case_ = patch_size
snake_case_ = classifier_dropout
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
__snake_case = version.parse("1.12" )
@property
def UpperCamelCase__ ( self ):
# The order of inputs is different for question answering and sequence classification
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
else:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''bbox''', {0: '''batch''', 1: '''sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels'''}),
] )
@property
def UpperCamelCase__ ( self ):
return 1E-5
@property
def UpperCamelCase__ ( self ):
return 12
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = 3 , _UpperCAmelCase = 40 , _UpperCAmelCase = 40 , ):
setattr(processor.image_processor , '''apply_ocr''' , _UpperCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case_ = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case_ = processor.tokenizer.num_special_tokens_to_add(_UpperCAmelCase )
snake_case_ = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
snake_case_ = [[''' '''.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
snake_case_ = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
snake_case_ = self._generate_dummy_images(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
snake_case_ = dict(
processor(
_UpperCAmelCase , text=_UpperCAmelCase , boxes=_UpperCAmelCase , return_tensors=_UpperCAmelCase , ) )
return inputs | 531 |
def __lowerCAmelCase (SCREAMING_SNAKE_CASE = 6008_5147_5143 )-> int:
"""simple docstring"""
try:
snake_case_ = int(SCREAMING_SNAKE_CASE )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
snake_case_ = 1
snake_case_ = 2
while i * i <= n:
while n % i == 0:
snake_case_ = i
n //= i
i += 1
if n > 1:
snake_case_ = n
return int(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(f'''{solution() = }''') | 531 | 1 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
_lowerCAmelCase :Dict = logging.get_logger(__name__)
_lowerCAmelCase :Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_lowerCAmelCase :Optional[Any] = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
_lowerCAmelCase :Optional[int] = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
_lowerCAmelCase :List[Any] = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
_lowerCAmelCase :Union[str, Any] = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
_lowerCAmelCase :Any = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
_lowerCAmelCase :Optional[int] = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
_lowerCAmelCase :Optional[Any] = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
_lowerCAmelCase :List[str] = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
_lowerCAmelCase :int = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =VOCAB_FILES_NAMES
a__ =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a__ =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
a__ =DPRContextEncoderTokenizer
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =VOCAB_FILES_NAMES
a__ =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a__ =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
a__ =DPRQuestionEncoderTokenizer
_lowerCAmelCase :List[str] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
_lowerCAmelCase :int = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
_lowerCAmelCase :Tuple = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(a )
class _UpperCAmelCase :
'''simple docstring'''
def __call__( self , A , A = None , A = None , A = False , A = False , A = None , A = None , A = None , **A , ) -> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
A , padding=A , truncation=A , max_length=A , return_tensors=A , return_attention_mask=A , **A , )
elif titles is None or texts is None:
_UpperCAmelCase : List[str] = titles if texts is None else texts
return super().__call__(
A , A , padding=A , truncation=A , max_length=A , return_tensors=A , return_attention_mask=A , **A , )
_UpperCAmelCase : Optional[int] = titles if not isinstance(A , A ) else [titles]
_UpperCAmelCase : Any = texts if not isinstance(A , A ) else [texts]
_UpperCAmelCase : Any = len(A )
_UpperCAmelCase : List[str] = questions if not isinstance(A , A ) else [questions] * n_passages
assert len(A ) == len(
A ), f'There should be as many titles than texts but got {len(A )} titles and {len(A )} texts.'
_UpperCAmelCase : Any = super().__call__(A , A , padding=A , truncation=A )['''input_ids''']
_UpperCAmelCase : Dict = super().__call__(A , add_special_tokens=A , padding=A , truncation=A )['''input_ids''']
_UpperCAmelCase : List[str] = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(A , A )
]
}
if return_attention_mask is not False:
_UpperCAmelCase : List[Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
_UpperCAmelCase : Optional[Any] = attention_mask
return self.pad(A , padding=A , max_length=A , return_tensors=A )
def __lowerCAmelCase ( self , A , A , A = 1_6 , A = 6_4 , A = 4 , ) -> List[DPRSpanPrediction]:
_UpperCAmelCase : List[Any] = reader_input['''input_ids''']
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = reader_output[:3]
_UpperCAmelCase : Optional[int] = len(A )
_UpperCAmelCase : List[str] = sorted(range(A ) , reverse=A , key=relevance_logits.__getitem__ )
_UpperCAmelCase : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
_UpperCAmelCase : Union[str, Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
_UpperCAmelCase : Optional[int] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
_UpperCAmelCase : Optional[int] = sequence_ids.index(self.pad_token_id )
else:
_UpperCAmelCase : Any = len(A )
_UpperCAmelCase : Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=A , top_spans=A , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=A , start_index=A , end_index=A , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(A ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __lowerCAmelCase ( self , A , A , A , A , ) -> List[DPRSpanPrediction]:
_UpperCAmelCase : str = []
for start_index, start_score in enumerate(A ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
_UpperCAmelCase : List[str] = sorted(A , key=lambda A : x[1] , reverse=A )
_UpperCAmelCase : List[Any] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]'
_UpperCAmelCase : List[str] = end_index - start_index + 1
assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}'
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(A ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a )
class _UpperCAmelCase ( a ,a ):
'''simple docstring'''
a__ =VOCAB_FILES_NAMES
a__ =READER_PRETRAINED_VOCAB_FILES_MAP
a__ =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ =READER_PRETRAINED_INIT_CONFIGURATION
a__ =['''input_ids''', '''attention_mask''']
a__ =DPRReaderTokenizer
| 506 |
"""simple docstring"""
from __future__ import annotations
import math
def lowerCamelCase_ (UpperCamelCase__ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_lowerCAmelCase :Optional[int] = [num for num in range(3, 100_001, 2) if not is_prime(num)]
def lowerCamelCase_ (UpperCamelCase__ : int ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('''n must be an integer''' )
if n <= 0:
raise ValueError('''n must be >= 0''' )
_UpperCAmelCase : Dict = []
for num in range(len(UpperCamelCase__ ) ):
_UpperCAmelCase : Optional[int] = 0
while 2 * i * i <= odd_composites[num]:
_UpperCAmelCase : Any = odd_composites[num] - 2 * i * i
if is_prime(UpperCamelCase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(UpperCamelCase__ ) == n:
return list_nums
return []
def lowerCamelCase_ ():
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f"{solution() = }")
| 506 | 1 |
'''simple docstring'''
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('--user', type=str, default='ubuntu')
parser.add_argument('--host', type=str, default='localhost')
parser.add_argument('--key_path', type=str, default=None)
parser.add_argument('--instance', type=str, default='V100:1')
parser.add_argument('--provider', type=str, default='cheapest')
parser.add_argument('--use_spot', type=bool, default=False)
parser.add_argument('--example', type=str, default='pytorch/text-generation/run_generation.py')
_lowerCamelCase , _lowerCamelCase = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('Cannot specify both BYO and on-demand cluster args')
_lowerCamelCase = rh.cluster(
name='rh-cluster', ips=[args.host], ssh_creds={'ssh_user': args.user, 'ssh_private_key': args.key_path}
)
else:
_lowerCamelCase = rh.cluster(
name='rh-cluster', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
_lowerCamelCase = args.example.rsplit('/', 1)[0]
# Set up remote environment
cluster.install_packages(['pip:./']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(['pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F'''python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True) | 718 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ ( UpperCamelCase__ , unittest.TestCase ):
lowerCamelCase_ = OpenAIGPTTokenizer
lowerCamelCase_ = OpenAIGPTTokenizerFast
lowerCamelCase_ = True
lowerCamelCase_ = False
def _snake_case ( self :Optional[Any] ) -> Dict:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
SCREAMING_SNAKE_CASE__ = dict(zip(__A , range(len(__A ) ) ) )
SCREAMING_SNAKE_CASE__ = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""]
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(__A ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(__A ) )
def _snake_case ( self :Union[str, Any] , __A :str ) -> List[Any]:
"""simple docstring"""
return "lower newer", "lower newer"
def _snake_case ( self :Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
SCREAMING_SNAKE_CASE__ = """lower"""
SCREAMING_SNAKE_CASE__ = ["""low""", """er</w>"""]
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
SCREAMING_SNAKE_CASE__ = tokens + ["""<unk>"""]
SCREAMING_SNAKE_CASE__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
def _snake_case ( self :Optional[Any] , __A :Optional[Any]=15 ) -> Any:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(__A , **__A )
# Simple input
SCREAMING_SNAKE_CASE__ = """This is a simple input"""
SCREAMING_SNAKE_CASE__ = ["""This is a simple input 1""", """This is a simple input 2"""]
SCREAMING_SNAKE_CASE__ = ("""This is a simple input""", """This is a pair""")
SCREAMING_SNAKE_CASE__ = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(__A , tokenizer_r.encode , __A , max_length=__A , padding="""max_length""" )
# Simple input
self.assertRaises(__A , tokenizer_r.encode_plus , __A , max_length=__A , padding="""max_length""" )
# Simple input
self.assertRaises(
__A , tokenizer_r.batch_encode_plus , __A , max_length=__A , padding="""max_length""" , )
# Pair input
self.assertRaises(__A , tokenizer_r.encode , __A , max_length=__A , padding="""max_length""" )
# Pair input
self.assertRaises(__A , tokenizer_r.encode_plus , __A , max_length=__A , padding="""max_length""" )
# Pair input
self.assertRaises(
__A , tokenizer_r.batch_encode_plus , __A , max_length=__A , padding="""max_length""" , )
def _snake_case ( self :Dict ) -> List[Any]:
"""simple docstring"""
pass
@require_ftfy
@require_spacy
@require_tokenizers
class UpperCamelCase_ ( UpperCamelCase__ ):
pass | 59 | 0 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
_snake_case : Optional[int] = (3, 9, -11, 0, 7, 5, 1, -1)
_snake_case : Optional[Any] = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class __SCREAMING_SNAKE_CASE :
SCREAMING_SNAKE_CASE__ =42
SCREAMING_SNAKE_CASE__ =42
class __SCREAMING_SNAKE_CASE :
def __init__( self, _a ) -> List[str]:
__SCREAMING_SNAKE_CASE = None
for i in sorted(_a, reverse=_a ):
__SCREAMING_SNAKE_CASE = Node(_a, self.head )
def __iter__( self ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = self.head
while node:
yield node.data
__SCREAMING_SNAKE_CASE = node.next_node
def __len__( self ) -> Any:
return sum(1 for _ in self )
def __str__( self ) -> Union[str, Any]:
return " -> ".join([str(_a ) for node in self] )
def _A ( __snake_case :SortedLinkedList , __snake_case :SortedLinkedList ) -> Optional[int]:
"""simple docstring"""
return SortedLinkedList(list(SCREAMING_SNAKE_CASE_ ) + list(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case : Dict = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 693 |
import math
def _lowercase ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCamelCase = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _lowercase ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict=1 , **SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
UpperCamelCase = factor * value
UpperCamelCase = value
while not is_prime(SCREAMING_SNAKE_CASE_ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE_ )
return value
| 386 | 0 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def _UpperCAmelCase (UpperCamelCase_ : str , UpperCamelCase_ : float | Decimal , UpperCamelCase_ : float = 10**-10 ):
'''simple docstring'''
_lowerCAmelCase : Tuple = a
while True:
_lowerCAmelCase : Any = Decimal(UpperCamelCase_ ) - (
Decimal(eval(UpperCamelCase_ ) ) / Decimal(eval(str(diff(UpperCamelCase_ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(UpperCamelCase_ ) ) < precision: # noqa: S307
return float(UpperCamelCase_ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}''')
# Find Square Root of 5
print(F'''The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}''')
# Exponential Roots
print(F'''The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}''')
| 196 |
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase : Any = 1_6
_lowerCamelCase : Tuple = 3_2
def _UpperCAmelCase (UpperCamelCase_ : Accelerator , UpperCamelCase_ : DatasetDict , UpperCamelCase_ : List[int] , UpperCamelCase_ : List[int] , UpperCamelCase_ : int = 16 ):
'''simple docstring'''
_lowerCAmelCase : Any = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_lowerCAmelCase : int = DatasetDict(
{
"""train""": dataset["""train"""].select(UpperCamelCase_ ),
"""validation""": dataset["""train"""].select(UpperCamelCase_ ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(UpperCamelCase_ : str ):
# max_length=None => use the model max length (it's actually the default)
_lowerCAmelCase : List[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_lowerCAmelCase : List[str] = datasets.map(
UpperCamelCase_ , batched=UpperCamelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCAmelCase : Optional[Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(UpperCamelCase_ : Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_lowerCAmelCase : List[str] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_lowerCAmelCase : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
_lowerCAmelCase : int = 8
else:
_lowerCAmelCase : Dict = None
return tokenizer.pad(
UpperCamelCase_ , padding="""longest""" , max_length=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
_lowerCAmelCase : Optional[Any] = DataLoader(
tokenized_datasets["""train"""] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ )
_lowerCAmelCase : Tuple = DataLoader(
tokenized_datasets["""validation"""] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ )
_lowerCAmelCase : List[str] = DataLoader(
tokenized_datasets["""test"""] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ )
return train_dataloader, eval_dataloader, test_dataloader
def _UpperCAmelCase (UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] ):
'''simple docstring'''
# New Code #
_lowerCAmelCase : Dict = []
# Download the dataset
_lowerCAmelCase : Optional[int] = load_dataset("""glue""" , """mrpc""" )
# Create our splits
_lowerCAmelCase : Any = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
_lowerCAmelCase : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCAmelCase : List[str] = config["""lr"""]
_lowerCAmelCase : Any = int(config["""num_epochs"""] )
_lowerCAmelCase : Dict = int(config["""seed"""] )
_lowerCAmelCase : Union[str, Any] = int(config["""batch_size"""] )
_lowerCAmelCase : Optional[Any] = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
_lowerCAmelCase : Optional[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
_lowerCAmelCase : Optional[Any] = batch_size // MAX_GPU_BATCH_SIZE
_lowerCAmelCase : int = MAX_GPU_BATCH_SIZE
set_seed(UpperCamelCase_ )
# New Code #
# Create our folds:
_lowerCAmelCase : Optional[int] = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] )
_lowerCAmelCase : Any = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(UpperCamelCase_ ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = get_fold_dataloaders(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCAmelCase : int = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=UpperCamelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_lowerCAmelCase : Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
_lowerCAmelCase : Any = AdamW(params=model.parameters() , lr=UpperCamelCase_ )
# Instantiate scheduler
_lowerCAmelCase : Tuple = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase_ , num_warmup_steps=100 , num_training_steps=(len(UpperCamelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Now we train the model
for epoch in range(UpperCamelCase_ ):
model.train()
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_lowerCAmelCase : Tuple = model(**UpperCamelCase_ )
_lowerCAmelCase : Optional[int] = outputs.loss
_lowerCAmelCase : Optional[int] = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCAmelCase : List[Any] = model(**UpperCamelCase_ )
_lowerCAmelCase : List[Any] = outputs.logits.argmax(dim=-1 )
_lowerCAmelCase , _lowerCAmelCase : Any = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=UpperCamelCase_ , references=UpperCamelCase_ , )
_lowerCAmelCase : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , UpperCamelCase_ )
# New Code #
# We also run predictions on the test set at the very end
_lowerCAmelCase : Dict = []
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCAmelCase : Dict = model(**UpperCamelCase_ )
_lowerCAmelCase : List[Any] = outputs.logits
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(UpperCamelCase_ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
_lowerCAmelCase : Tuple = torch.cat(UpperCamelCase_ , dim=0 )
_lowerCAmelCase : Tuple = torch.stack(UpperCamelCase_ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
_lowerCAmelCase : Union[str, Any] = metric.compute(predictions=UpperCamelCase_ , references=UpperCamelCase_ )
accelerator.print("""Average test metrics from all folds:""" , UpperCamelCase_ )
def _UpperCAmelCase ():
'''simple docstring'''
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=UpperCamelCase_ , default=UpperCamelCase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""" , type=UpperCamelCase_ , default=3 , help="""The number of splits to perform across the dataset""" )
_lowerCAmelCase : Optional[Any] = parser.parse_args()
_lowerCAmelCase : List[Any] = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
main()
| 196 | 1 |
"""simple docstring"""
from __future__ import annotations
def __snake_case ( __A ,__A ,__A ,__A ) -> None:
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
lowercase , lowercase : Union[str, Any] = array[indexa], array[indexa]
def __snake_case ( __A ,__A ,__A ,__A ) -> None:
if length > 1:
lowercase : Any = int(length / 2 )
for i in range(__A ,low + middle ):
comp_and_swap(__A ,__A ,i + middle ,__A )
bitonic_merge(__A ,__A ,__A ,__A )
bitonic_merge(__A ,low + middle ,__A ,__A )
def __snake_case ( __A ,__A ,__A ,__A ) -> None:
if length > 1:
lowercase : Any = int(length / 2 )
bitonic_sort(__A ,__A ,__A ,1 )
bitonic_sort(__A ,low + middle ,__A ,0 )
bitonic_merge(__A ,__A ,__A ,__A )
if __name__ == "__main__":
lowerCAmelCase: Tuple =input("Enter numbers separated by a comma:\n").strip()
lowerCAmelCase: Optional[int] =[int(item.strip()) for item in user_input.split(",")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("\nSorted array in ascending order is: ", end="")
print(*unsorted, sep=", ")
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("Sorted array in descending order is: ", end="")
print(*unsorted, sep=", ")
| 607 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def __snake_case ( __A ) -> Any:
lowercase : List[str] = os.path.join(args.tf_model_dir ,"""parameters.json""" )
lowercase : Any = json.loads(open(__A ).read() )
if not params:
raise ValueError(
F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith(""".pt""" ):
lowercase : List[str] = args.output + """.pt"""
lowercase : Dict = OrderedDict()
with tf.device("""/CPU:0""" ):
lowercase : Any = tf.train.load_checkpoint(args.tf_model_dir )
lowercase : List[Any] = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowercase : Optional[Any] = reader.get_tensor(__A ).astype(np.floataa )
if key_name.endswith("""/adam_m""" ) or key_name.endswith("""/adam_v""" ):
continue
if key_name.startswith("""pasts/""" ):
if key_name.startswith("""pasts/mlp""" ):
lowercase : str = int(key_name[9] )
elif key_name.startswith("""pasts/out""" ):
lowercase : List[Any] = 8
lowercase : Optional[int] = """model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowercase : int = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : int = torch.tensor(__A )
elif key_name.startswith("""model/moe""" ):
lowercase : Optional[int] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/switch_gating/kernel""" ):
lowercase : Any = """model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player
lowercase : List[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Tuple = torch.tensor(__A )
elif key_name.endswith("""/softmlp/kernel""" ):
lowercase : str = """model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player
lowercase : Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[int] = torch.tensor(__A )
elif key_name.endswith("""/wo/kernel""" ) or key_name.endswith("""/wi/kernel""" ):
lowercase : Union[str, Any] = key_name[-9:-7]
for i in range(16 ):
lowercase : int = """model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer)
lowercase : str = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowercase : List[Any] = torch.tensor(__A )
elif key_name.startswith("""model/mlp""" ):
lowercase : Any = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/p1/kernel""" ):
lowercase : Dict = """model.blocks.%d.feed_forward.mlp.wi.weight""" % player
lowercase : Any = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : int = torch.tensor(__A )
elif key_name.endswith("""/p1/bias""" ):
lowercase : Tuple = """model.blocks.%d.feed_forward.mlp.wi.bias""" % player
lowercase : Tuple = vnp.copy() # same because it is one dimensional
lowercase : Tuple = torch.tensor(__A )
elif key_name.endswith("""/p2/kernel""" ):
lowercase : int = """model.blocks.%d.feed_forward.mlp.wo.weight""" % player
lowercase : Any = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[Any] = torch.tensor(__A )
elif key_name.endswith("""/p2/bias""" ):
lowercase : Any = """model.blocks.%d.feed_forward.mlp.wo.bias""" % player
lowercase : str = vnp.copy() # same because it is one dimensional
lowercase : str = torch.tensor(__A )
elif key_name.startswith("""model/ln""" ):
lowercase : Union[str, Any] = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
lowercase : Any = """model.blocks.%d.feed_forward.norm.bias""" % player
lowercase : List[str] = vnp.copy() # same because it is one dimensional
lowercase : Any = torch.tensor(__A )
elif key_name.endswith("""/g""" ):
lowercase : int = """model.blocks.%d.feed_forward.norm.weight""" % player
lowercase : Union[str, Any] = vnp.copy() # same because it is one dimensional
lowercase : Optional[Any] = torch.tensor(__A )
elif key_name.startswith("""model/att""" ):
lowercase : Optional[int] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/qkv/kernel""" ):
lowercase : Optional[int] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowercase : Any = state[:, 0, :, :]
lowercase : List[Any] = state[:, 1, :, :]
lowercase : Optional[Any] = state[:, 2, :, :]
lowercase : Dict = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : List[str] = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[Any] = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : str = """model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player
lowercase : Optional[int] = torch.tensor(__A )
lowercase : int = """model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player
lowercase : Optional[Any] = torch.tensor(__A )
lowercase : Tuple = """model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player
lowercase : List[str] = torch.tensor(__A )
elif key_name.endswith("""/o/kernel""" ):
lowercase : Any = """model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player
lowercase : List[Any] = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase : Optional[int] = torch.tensor(__A )
elif key_name.startswith("""model/an""" ):
lowercase : Tuple = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
lowercase : List[str] = """model.blocks.%d.self_attn.norm.bias""" % player
lowercase : List[str] = vnp.copy() # same because it is one dimensional
lowercase : int = torch.tensor(__A )
elif key_name.endswith("""/g""" ):
lowercase : Any = """model.blocks.%d.self_attn.norm.weight""" % player
lowercase : Union[str, Any] = vnp.copy() # same because it is one dimensional
lowercase : int = torch.tensor(__A )
elif (
key_name.startswith("""model/wte""" )
or key_name.startswith("""model/wpe""" )
or key_name.startswith("""model/ete""" )
):
lowercase : Any = {"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[
key_name[-3:]
]
lowercase : Optional[int] = """model.%s.weight""" % nlayer
lowercase : Any = vnp.copy() # same in embedded
lowercase : Dict = torch.tensor(__A )
if key_name.startswith("""model/wte""" ):
lowercase : Optional[Any] = """lm_head.weight"""
lowercase : int = vnp.copy() # same in embedded
lowercase : str = torch.tensor(__A )
elif key_name.startswith("""model/wob""" ):
lowercase : str = """final_logits_bias"""
lowercase : List[str] = vnp.copy() # same in embedded
lowercase : str = state.reshape((1, -1) )
lowercase : Tuple = torch.tensor(__A )
elif key_name == "model/dense/kernel":
lowercase : Dict = """model.last_project.weight"""
lowercase : Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase : int = torch.tensor(__A )
elif key_name == "model/dense_1/bias":
lowercase : List[str] = """model.last_project.bias"""
lowercase : Dict = vnp.copy() # same because it is one dimensional
lowercase : List[str] = torch.tensor(__A )
torch.save(__A ,args.output )
if __name__ == "__main__":
lowerCAmelCase: Tuple =argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
lowerCAmelCase: Tuple =parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 607 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __lowercase( SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = ShapEPipeline
UpperCamelCase_ = ['''prompt''']
UpperCamelCase_ = ['''prompt''']
UpperCamelCase_ = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase_ = False
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Optional[Any]:
return 32
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Dict:
return 32
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Optional[int]:
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Optional[int]:
return 8
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> List[str]:
_lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Any:
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_lowerCAmelCase )
@property
def SCREAMING_SNAKE_CASE_ ( self : str ) -> List[str]:
torch.manual_seed(0 )
_lowerCAmelCase = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
_lowerCAmelCase = PriorTransformer(**_lowerCAmelCase )
return model
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> List[str]:
torch.manual_seed(0 )
_lowerCAmelCase = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
_lowerCAmelCase = ShapERenderer(**_lowerCAmelCase )
return model
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Any:
_lowerCAmelCase = self.dummy_prior
_lowerCAmelCase = self.dummy_text_encoder
_lowerCAmelCase = self.dummy_tokenizer
_lowerCAmelCase = self.dummy_renderer
_lowerCAmelCase = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=_lowerCAmelCase , clip_sample=_lowerCAmelCase , clip_sample_range=1.0 , )
_lowerCAmelCase = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def SCREAMING_SNAKE_CASE_ ( self : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any]=0 ) -> Optional[Any]:
if str(_lowerCAmelCase ).startswith('mps' ):
_lowerCAmelCase = torch.manual_seed(_lowerCAmelCase )
else:
_lowerCAmelCase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
_lowerCAmelCase = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Optional[Any]:
_lowerCAmelCase = 'cpu'
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**_lowerCAmelCase )
_lowerCAmelCase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase = pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
_lowerCAmelCase = output.images[0]
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_lowerCAmelCase = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> List[Any]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[Any]:
_lowerCAmelCase = torch_device == 'cpu'
_lowerCAmelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_lowerCAmelCase , relax_max_difference=_lowerCAmelCase , )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> int:
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**_lowerCAmelCase )
_lowerCAmelCase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase = 1
_lowerCAmelCase = 2
_lowerCAmelCase = self.get_dummy_inputs(_lowerCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
_lowerCAmelCase = batch_size * [inputs[key]]
_lowerCAmelCase = pipe(**_lowerCAmelCase , num_images_per_prompt=_lowerCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __lowercase( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : int ) -> int:
_lowerCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
_lowerCAmelCase = ShapEPipeline.from_pretrained('openai/shap-e' )
_lowerCAmelCase = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
_lowerCAmelCase = pipe(
'a shark' , generator=_lowerCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
| 705 |
import requests
from bsa import BeautifulSoup
def _a ( __SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus" ):
"""simple docstring"""
_lowerCAmelCase = BeautifulSoup(requests.get(__SCREAMING_SNAKE_CASE ).text , 'html.parser' )
_lowerCAmelCase = soup.findAll('h1' )
_lowerCAmelCase = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(F"{key}\n{value}\n")
| 585 | 0 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def lowercase_ ( _lowerCamelCase: str ) -> Dict:
'''simple docstring'''
__lowerCamelCase : Optional[int] = []
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
F"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
F"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
F"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
F"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def lowercase_ ( _lowerCamelCase: Dict , _lowerCamelCase: int ) -> List[str]:
'''simple docstring'''
__lowerCamelCase : List[str] = []
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", F"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", F"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", F"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", F"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def lowercase_ ( _lowerCamelCase: Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = []
token.append((F"""cvt.encoder.stages.{idx}.cls_token""", "stage2.cls_token") )
return token
def lowercase_ ( ) -> Dict:
'''simple docstring'''
__lowerCamelCase : Optional[Any] = []
head.append(("layernorm.weight", "norm.weight") )
head.append(("layernorm.bias", "norm.bias") )
head.append(("classifier.weight", "head.weight") )
head.append(("classifier.bias", "head.bias") )
return head
def lowercase_ ( _lowerCamelCase: Any , _lowerCamelCase: Optional[int] , _lowerCamelCase: Dict , _lowerCamelCase: Optional[int] ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase : Optional[int] = "imagenet-1k-id2label.json"
__lowerCamelCase : Any = 1000
__lowerCamelCase : Dict = "huggingface/label-files"
__lowerCamelCase : Dict = num_labels
__lowerCamelCase : Dict = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) ) , "r" ) )
__lowerCamelCase : Dict = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
__lowerCamelCase : Optional[int] = idalabel
__lowerCamelCase : int = {v: k for k, v in idalabel.items()}
__lowerCamelCase : Union[str, Any] = CvtConfig(num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/" , 1 )[-1][4:6] == "13":
__lowerCamelCase : Any = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/" , 1 )[-1][4:6] == "21":
__lowerCamelCase : Tuple = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__lowerCamelCase : List[str] = [2, 2, 20]
__lowerCamelCase : Dict = [3, 12, 16]
__lowerCamelCase : Any = [192, 768, 1024]
__lowerCamelCase : Tuple = CvtForImageClassification(_lowerCamelCase )
__lowerCamelCase : Optional[Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
__lowerCamelCase : int = image_size
__lowerCamelCase : Dict = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) )
__lowerCamelCase : Union[str, Any] = OrderedDict()
__lowerCamelCase : str = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__lowerCamelCase : int = list_of_state_dict + cls_token(_lowerCamelCase )
__lowerCamelCase : Tuple = list_of_state_dict + embeddings(_lowerCamelCase )
for cnt in range(config.depth[idx] ):
__lowerCamelCase : Any = list_of_state_dict + attention(_lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : Optional[int] = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
__lowerCamelCase : Optional[int] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
image_processor.save_pretrained(_lowerCamelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__A = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path) | 646 | """simple docstring"""
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self : str , UpperCAmelCase : int , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : int = 32 , UpperCAmelCase : bool = True , UpperCAmelCase : Union[int, float] = 1 / 255 , UpperCAmelCase : bool = True , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[float, List[float]]] = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , UpperCAmelCase : Optional[Union[float, List[float]]] = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , UpperCAmelCase : bool = True , UpperCAmelCase : str=7 , UpperCAmelCase : Union[str, Any]=30 , UpperCAmelCase : int=400 , UpperCAmelCase : Any=3 , ):
__lowerCamelCase : str = parent
__lowerCamelCase : Tuple = do_resize
__lowerCamelCase : Tuple = size if size is not None else {"shortest_edge": 288}
__lowerCamelCase : List[Any] = size_divisor
__lowerCamelCase : List[str] = do_rescale
__lowerCamelCase : List[Any] = rescale_factor
__lowerCamelCase : Optional[int] = do_normalize
__lowerCamelCase : Tuple = do_center_crop
__lowerCamelCase : List[Any] = image_mean
__lowerCamelCase : Tuple = image_std
__lowerCamelCase : Any = do_pad
__lowerCamelCase : int = batch_size
__lowerCamelCase : Optional[Any] = num_channels
__lowerCamelCase : Optional[int] = min_resolution
__lowerCamelCase : Optional[int] = max_resolution
def lowerCamelCase__ ( self : Tuple ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any]=False ):
if not batched:
__lowerCamelCase : List[str] = self.size["shortest_edge"]
__lowerCamelCase : Optional[int] = image_inputs[0]
if isinstance(UpperCAmelCase , Image.Image ):
__lowerCamelCase , __lowerCamelCase : str = image.size
else:
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = image.shape[1], image.shape[2]
__lowerCamelCase : Optional[Any] = size / min(UpperCAmelCase , UpperCAmelCase )
if h < w:
__lowerCamelCase , __lowerCamelCase : int = size, scale * w
else:
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = scale * h, size
__lowerCamelCase : Optional[Any] = int((1333 / 800) * size )
if max(UpperCAmelCase , UpperCAmelCase ) > max_size:
__lowerCamelCase : Union[str, Any] = max_size / max(UpperCAmelCase , UpperCAmelCase )
__lowerCamelCase : Dict = newh * scale
__lowerCamelCase : str = neww * scale
__lowerCamelCase , __lowerCamelCase : Optional[int] = int(newh + 0.5 ), int(neww + 0.5 )
__lowerCamelCase , __lowerCamelCase : Dict = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
__lowerCamelCase : Optional[int] = []
for image in image_inputs:
__lowerCamelCase , __lowerCamelCase : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCamelCase : Any = max(UpperCAmelCase , key=lambda UpperCAmelCase : item[0] )[0]
__lowerCamelCase : Any = max(UpperCAmelCase , key=lambda UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _snake_case ( a__ , unittest.TestCase ):
snake_case__ = BridgeTowerImageProcessor if is_vision_available() else None
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : Tuple = BridgeTowerImageProcessingTester(self )
@property
def lowerCamelCase__ ( self : str ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase__ ( self : List[Any] ):
__lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(UpperCAmelCase , "image_std" ) )
self.assertTrue(hasattr(UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(UpperCAmelCase , "size" ) )
self.assertTrue(hasattr(UpperCAmelCase , "size_divisor" ) )
def lowerCamelCase__ ( self : Union[str, Any] ):
pass
def lowerCamelCase__ ( self : Optional[Any] ):
# Initialize image processor
__lowerCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCamelCase : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input
__lowerCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__lowerCamelCase , __lowerCamelCase : List[str] = self.image_processor_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase : int = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
__lowerCamelCase , __lowerCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase__ ( self : List[Any] ):
# Initialize image processor
__lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCamelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , np.ndarray )
# Test not batched input
__lowerCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__lowerCamelCase , __lowerCamelCase : Dict = self.image_processor_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase : str = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
__lowerCamelCase , __lowerCamelCase : List[str] = self.image_processor_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCamelCase__ ( self : Optional[int] ):
# Initialize image processor
__lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test not batched input
__lowerCamelCase : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCamelCase : str = image_processing(UpperCAmelCase , return_tensors="pt" ).pixel_values
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , ) | 646 | 1 |
"""simple docstring"""
import random
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : Union[str, Any] = num - 1
snake_case_ : List[str] = 0
while s % 2 == 0:
snake_case_ : str = s // 2
t += 1
for _ in range(5 ):
snake_case_ : List[Any] = random.randrange(2 , num - 1 )
snake_case_ : Dict = pow(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if v != 1:
snake_case_ : int = 0
while v != (num - 1):
if i == t - 1:
return False
else:
snake_case_ : str = i + 1
snake_case_ : int = (v**2) % num
return True
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
if num < 2:
return False
snake_case_ : Dict = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int = 1_0_2_4 ):
"""simple docstring"""
while True:
snake_case_ : Tuple = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(SCREAMING_SNAKE_CASE__ ):
return num
if __name__ == "__main__":
a_ = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 48 |
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
a_ = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
a_ = 10
a_ = 256
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE__ ) < MIN_NUM_TOKENS:
return None
snake_case_ : Union[str, Any] = MinHash(num_perm=SCREAMING_SNAKE_CASE__ )
for token in set(SCREAMING_SNAKE_CASE__ ):
min_hash.update(token.encode() )
return min_hash
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
return {t for t in NON_ALPHA.split(SCREAMING_SNAKE_CASE__ ) if len(t.strip() ) > 0}
class __lowercase :
"""simple docstring"""
def __init__(self , *,
lowercase__ = 0.85 , ):
snake_case_ : Tuple = duplication_jaccard_threshold
snake_case_ : Optional[Any] = NUM_PERM
snake_case_ : Tuple = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
snake_case_ : List[Any] = defaultdict(lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ ):
snake_case_ : int = self._index.query(lowercase__ )
if code_key in self._index.keys:
print(f'Duplicate key {code_key}' )
return
self._index.insert(lowercase__ , lowercase__ )
if len(lowercase__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowercase__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : str = []
for base, duplicates in self._duplicate_clusters.items():
snake_case_ : Optional[Any] = [base] + list(lowercase__ )
# reformat the cluster to be a list of dict
snake_case_ : Any = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(lowercase__ )
return duplicate_clusters
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : int = self.get_duplicate_clusters()
with open(lowercase__ , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ , snake_case_ : str = element
snake_case_ : Tuple = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] ):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(SCREAMING_SNAKE_CASE__ , max_queue_size=1_0_0_0_0 ) , chunksize=1_0_0 , ):
if data is not None:
yield data
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] , SCREAMING_SNAKE_CASE__ : float ):
"""simple docstring"""
snake_case_ : int = DuplicationIndex(duplication_jaccard_threshold=SCREAMING_SNAKE_CASE__ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(SCREAMING_SNAKE_CASE__ ) ) , max_queue_size=1_0_0 ) ):
di.add(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : int = get_tokens(SCREAMING_SNAKE_CASE__ )
snake_case_ : Tuple = get_tokens(SCREAMING_SNAKE_CASE__ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
a_ = None
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
snake_case_ : Optional[Any] = []
for elementa in cluster:
snake_case_ : Union[str, Any] = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
snake_case_ : Any = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
snake_case_ : Union[str, Any] = 1
extremes.append(SCREAMING_SNAKE_CASE__ )
return extremes
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
global _shared_dataset
snake_case_ : str = dataset
snake_case_ : int = []
snake_case_ : Optional[int] = partial(_find_cluster_extremes_shared , jaccard_threshold=SCREAMING_SNAKE_CASE__ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) , total=len(SCREAMING_SNAKE_CASE__ ) , ):
extremes_list.append(SCREAMING_SNAKE_CASE__ )
return extremes_list
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Type[Dataset] , SCREAMING_SNAKE_CASE__ : float = 0.85 ):
"""simple docstring"""
snake_case_ : List[str] = make_duplicate_clusters(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : str = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
snake_case_ : str = {}
snake_case_ : Dict = find_extremes(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for extremes in extremes_clusters:
for element in extremes:
snake_case_ : int = element
snake_case_ : Optional[int] = duplicate_indices - set(extreme_dict.keys() )
snake_case_ : List[Any] = dataset.filter(lambda SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : idx not in remove_indices , with_indices=SCREAMING_SNAKE_CASE__ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
snake_case_ : List[Any] = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
snake_case_ : str = extreme_dict[element["""base_index"""]]["""copies"""]
print(f'Original dataset size: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Number of duplicate clusters: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Files in duplicate cluster: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Unique files in duplicate cluster: {len(SCREAMING_SNAKE_CASE__ )}' )
print(f'Filtered dataset size: {len(SCREAMING_SNAKE_CASE__ )}' )
return ds_filter, duplicate_clusters
| 48 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , __a=0 , ) -> Any:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
_UpperCamelCase = projection_dim
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
_UpperCamelCase = DPRConfig(projection_dim=self.projection_dim , **config.to_dict())
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = TFDPRContextEncoder(config=__a)
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a)
_UpperCamelCase = model(__a , token_type_ids=__a)
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = TFDPRQuestionEncoder(config=__a)
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a)
_UpperCamelCase = model(__a , token_type_ids=__a)
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Dict:
'''simple docstring'''
_UpperCamelCase = TFDPRReader(config=__a)
_UpperCamelCase = model(__a , attention_mask=__a)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,))
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
lowercase__ = {'feature-extraction': TFDPRQuestionEncoder} if is_tf_available() else {}
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = TFDPRModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , hidden_size=37)
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__a)
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__a)
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__a)
@slow
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFDPRContextEncoder.from_pretrained(__a)
self.assertIsNotNone(__a)
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFDPRContextEncoder.from_pretrained(__a)
self.assertIsNotNone(__a)
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFDPRQuestionEncoder.from_pretrained(__a)
self.assertIsNotNone(__a)
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFDPRReader.from_pretrained(__a)
self.assertIsNotNone(__a)
@require_tf
class _UpperCAmelCase( unittest.TestCase ):
@slow
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''')
_UpperCamelCase = tf.constant(
[[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]]) # [CLS] hello, is my dog cute? [SEP]
_UpperCamelCase = model(__a)[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_UpperCamelCase = tf.constant(
[
[
0.0323_6253,
0.1275_3335,
0.1681_8509,
0.0027_9786,
0.389_6933,
0.2426_4945,
0.217_8971,
-0.0233_5227,
-0.0848_1959,
-0.1432_4117,
]
])
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4))
| 19 |
"""simple docstring"""
import heapq
def lowerCamelCase__ ( __snake_case ) -> set[int]:
"""simple docstring"""
_UpperCamelCase = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(__snake_case, [-1 * len(__snake_case ), (key, value)] )
# chosen_vertices = set of chosen vertices
_UpperCamelCase = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_UpperCamelCase = heapq.heappop(__snake_case )[1][0]
chosen_vertices.add(__snake_case )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_UpperCamelCase = elem[1][1].index(__snake_case )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(__snake_case )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_a = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 19 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : Any = {
'''caidas/swin2sr-classicalsr-x2-64''': (
'''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'''
),
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''swin2sr'''
a__ = {
'''hidden_size''': '''embed_dim''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self :List[Any] , lowerCAmelCase__ :Dict=64 , lowerCAmelCase__ :List[Any]=1 , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :List[str]=180 , lowerCAmelCase__ :Dict=[6, 6, 6, 6, 6, 6] , lowerCAmelCase__ :Optional[int]=[6, 6, 6, 6, 6, 6] , lowerCAmelCase__ :List[str]=8 , lowerCAmelCase__ :Dict=2.0 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :str=0.0 , lowerCAmelCase__ :int=0.0 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :Union[str, Any]="gelu" , lowerCAmelCase__ :Any=False , lowerCAmelCase__ :List[str]=0.0_2 , lowerCAmelCase__ :Union[str, Any]=1E-5 , lowerCAmelCase__ :Optional[int]=2 , lowerCAmelCase__ :Tuple=1.0 , lowerCAmelCase__ :List[Any]="1conv" , lowerCAmelCase__ :Optional[Any]="pixelshuffle" , **lowerCAmelCase__ :Any , ) -> Tuple:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
snake_case_ : Tuple = image_size
snake_case_ : List[Any] = patch_size
snake_case_ : int = num_channels
snake_case_ : Tuple = embed_dim
snake_case_ : Any = depths
snake_case_ : List[str] = len(lowerCAmelCase__ )
snake_case_ : Tuple = num_heads
snake_case_ : int = window_size
snake_case_ : List[str] = mlp_ratio
snake_case_ : Optional[Any] = qkv_bias
snake_case_ : Optional[int] = hidden_dropout_prob
snake_case_ : List[str] = attention_probs_dropout_prob
snake_case_ : Optional[Any] = drop_path_rate
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : List[str] = use_absolute_embeddings
snake_case_ : List[str] = layer_norm_eps
snake_case_ : Optional[int] = initializer_range
snake_case_ : Optional[int] = upscale
snake_case_ : str = img_range
snake_case_ : Any = resi_connection
snake_case_ : Any = upsampler
| 707 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
snake_case_ : Any = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
snake_case_ : int = Dataset.from_dict(__magic_name__ )
return dataset
class A_ (a_ ):
"""simple docstring"""
def _A ( self :List[str] ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = get_dataset()
snake_case_ : Optional[int] = make_duplicate_clusters(lowerCAmelCase__ , 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = get_dataset()
snake_case_, snake_case_ : List[Any] = deduplicate_dataset(lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , 2 )
print(lowerCAmelCase__ )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , lowerCAmelCase__ )
| 656 | 0 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class A_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self: List[Any] , a: Dict , a: int=13 , a: Any=7 , a: Tuple=True , a: Union[str, Any]=True , a: List[str]=True , a: str=True , a: Any=99 , a: int=32 , a: Optional[int]=5 , a: Union[str, Any]=4 , a: int=37 , a: Union[str, Any]="gelu" , a: Union[str, Any]=0.1 , a: Optional[int]=0.1 , a: Tuple=512 , a: Dict=16 , a: List[str]=2 , a: Tuple=0.0_2 , a: Any=4 , ):
__lowerCamelCase : str = parent
__lowerCamelCase : Tuple = batch_size
__lowerCamelCase : Optional[int] = seq_length
__lowerCamelCase : Optional[Any] = is_training
__lowerCamelCase : Tuple = use_attention_mask
__lowerCamelCase : int = use_token_type_ids
__lowerCamelCase : List[Any] = use_labels
__lowerCamelCase : List[Any] = vocab_size
__lowerCamelCase : int = hidden_size
__lowerCamelCase : List[Any] = num_hidden_layers
__lowerCamelCase : int = num_attention_heads
__lowerCamelCase : Dict = intermediate_size
__lowerCamelCase : Union[str, Any] = hidden_act
__lowerCamelCase : int = hidden_dropout_prob
__lowerCamelCase : Tuple = attention_probs_dropout_prob
__lowerCamelCase : Optional[int] = max_position_embeddings
__lowerCamelCase : Optional[int] = type_vocab_size
__lowerCamelCase : Tuple = type_sequence_label_size
__lowerCamelCase : List[str] = initializer_range
__lowerCamelCase : List[Any] = num_choices
def _snake_case ( self: List[str] ):
__lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase : Dict = None
if self.use_attention_mask:
__lowerCamelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase : Optional[Any] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=snake_case_ , )
return config, input_ids, attention_mask
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : List[str] = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Union[str, Any] = config_and_inputs
__lowerCamelCase : Optional[Any] = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : Optional[Any] = FlaxDistilBertModelTester(self )
@slow
def _snake_case ( self: Tuple ):
for model_class_name in self.all_model_classes:
__lowerCamelCase : List[Any] = model_class_name.from_pretrained('distilbert-base-uncased' )
__lowerCamelCase : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case_ )
@require_flax
class A_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _snake_case ( self: List[str] ):
__lowerCamelCase : Tuple = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased' )
__lowerCamelCase : Union[str, Any] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__lowerCamelCase : str = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__lowerCamelCase : List[str] = model(snake_case_ , attention_mask=snake_case_ )[0]
__lowerCamelCase : Union[str, Any] = (1, 11, 768)
self.assertEqual(output.shape , snake_case_ )
__lowerCamelCase : int = np.array([[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , snake_case_ , atol=1e-4 ) )
| 669 |
'''simple docstring'''
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class A ( a , a , a , unittest.TestCase ):
__UpperCAmelCase : List[Any] = StableDiffusionControlNetImgaImgPipeline
__UpperCAmelCase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
__UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__UpperCAmelCase : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"""control_image"""} )
__UpperCAmelCase : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
torch.manual_seed(0 )
_a = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
torch.manual_seed(0 )
_a = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
_a = CLIPTextModel(snake_case_ )
_a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_a = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __lowerCAmelCase ( self , snake_case_ , snake_case_=0 ) -> Optional[int]:
if str(snake_case_ ).startswith("mps" ):
_a = torch.manual_seed(snake_case_ )
else:
_a = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_a = 2
_a = randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=snake_case_ , device=torch.device(snake_case_ ) , )
_a = floats_tensor(control_image.shape , rng=random.Random(snake_case_ ) ).to(snake_case_ )
_a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(snake_case_ ) ).convert("RGB" ).resize((6_4, 6_4) )
_a = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def __lowerCAmelCase ( self ) -> str:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __lowerCAmelCase ( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> List[str]:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class A ( a , a , unittest.TestCase ):
__UpperCAmelCase : Dict = StableDiffusionControlNetImgaImgPipeline
__UpperCAmelCase : int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width"""}
__UpperCAmelCase : Dict = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__UpperCAmelCase : List[Any] = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __lowerCAmelCase ( self ) -> str:
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
torch.manual_seed(0 )
def init_weights(snake_case_ ):
if isinstance(snake_case_ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
_a = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
controlneta.controlnet_down_blocks.apply(snake_case_ )
torch.manual_seed(0 )
_a = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
controlneta.controlnet_down_blocks.apply(snake_case_ )
torch.manual_seed(0 )
_a = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=snake_case_ , set_alpha_to_one=snake_case_ , )
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
_a = CLIPTextModel(snake_case_ )
_a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_a = MultiControlNetModel([controlneta, controlneta] )
_a = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __lowerCAmelCase ( self , snake_case_ , snake_case_=0 ) -> Optional[int]:
if str(snake_case_ ).startswith("mps" ):
_a = torch.manual_seed(snake_case_ )
else:
_a = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
_a = 2
_a = [
randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=snake_case_ , device=torch.device(snake_case_ ) , ),
randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=snake_case_ , device=torch.device(snake_case_ ) , ),
]
_a = floats_tensor(control_image[0].shape , rng=random.Random(snake_case_ ) ).to(snake_case_ )
_a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(snake_case_ ) ).convert("RGB" ).resize((6_4, 6_4) )
_a = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.get_dummy_components()
_a = self.pipeline_class(**snake_case_ )
pipe.to(snake_case_ )
_a = 10.0
_a = 4
_a = self.get_dummy_inputs(snake_case_ )
_a = steps
_a = scale
_a = pipe(**snake_case_ )[0]
_a = self.get_dummy_inputs(snake_case_ )
_a = steps
_a = scale
_a = pipe(**snake_case_ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
_a = self.get_dummy_inputs(snake_case_ )
_a = steps
_a = scale
_a = pipe(**snake_case_ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
_a = self.get_dummy_inputs(snake_case_ )
_a = steps
_a = scale
_a = pipe(**snake_case_ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def __lowerCAmelCase ( self ) -> Optional[int]:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __lowerCAmelCase ( self ) -> List[str]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> List[Any]:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_dummy_components()
_a = self.pipeline_class(**snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(snake_case_ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny" )
_a = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , safety_checker=snake_case_ , controlnet=snake_case_ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=snake_case_ )
_a = torch.Generator(device="cpu" ).manual_seed(0 )
_a = "evil space-punk bird"
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((5_1_2, 5_1_2) )
_a = load_image(
"https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((5_1_2, 5_1_2) )
_a = pipe(
snake_case_ , snake_case_ , control_image=snake_case_ , generator=snake_case_ , output_type="np" , num_inference_steps=5_0 , strength=0.6 , )
_a = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
_a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy" )
assert np.abs(expected_image - image ).max() < 9E-2
| 131 | 0 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__magic_name__ = ['image_processor', 'tokenizer']
__magic_name__ = 'BlipImageProcessor'
__magic_name__ = 'AutoTokenizer'
def __init__( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
super().__init__(_a , _a )
# add QFormer tokenizer
_lowerCAmelCase : Dict = qformer_tokenizer
def __call__( self , snake_case__ = None , snake_case__ = None , snake_case__ = True , snake_case__ = False , snake_case__ = None , snake_case__ = None , snake_case__ = 0 , snake_case__ = None , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = False , snake_case__ = True , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
_lowerCAmelCase : str = BatchFeature()
if text is not None:
_lowerCAmelCase : Union[str, Any] = self.tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
encoding.update(_a )
_lowerCAmelCase : List[Any] = self.qformer_tokenizer(
text=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_token_type_ids=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
_lowerCAmelCase : Union[str, Any] = qformer_text_encoding.pop('input_ids' )
_lowerCAmelCase : Tuple = qformer_text_encoding.pop('attention_mask' )
if images is not None:
_lowerCAmelCase : Optional[int] = self.image_processor(_a , return_tensors=_a )
encoding.update(_a )
return encoding
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_a , **_a )
def a ( self , *snake_case__ , **snake_case__ ):
'''simple docstring'''
return self.tokenizer.decode(*_a , **_a )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer.model_input_names
_lowerCAmelCase : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def a ( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
if os.path.isfile(_a ):
raise ValueError(F'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(_a , exist_ok=_a )
_lowerCAmelCase : Optional[int] = os.path.join(_a , 'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(_a )
return super().save_pretrained(_a , **_a )
@classmethod
def a ( cls , snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(_a , subfolder='qformer_tokenizer' )
_lowerCAmelCase : Any = cls._get_arguments_from_pretrained(_a , **_a )
args.append(_a )
return cls(*_a )
| 721 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : str = logging.get_logger(__name__)
lowerCAmelCase : int = {
"""facebook/data2vec-text-base""": """https://huggingface.co/data2vec/resolve/main/config.json""",
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = "data2vec-text"
def __init__( self , snake_case__=3_0522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=1 , snake_case__=0 , snake_case__=2 , snake_case__="absolute" , snake_case__=True , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : int = num_attention_heads
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : str = max_position_embeddings
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : List[str] = layer_norm_eps
_lowerCAmelCase : List[Any] = position_embedding_type
_lowerCAmelCase : str = use_cache
_lowerCAmelCase : Union[str, Any] = classifier_dropout
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@property
def a ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCAmelCase : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_lowerCAmelCase : List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 630 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a_ :Any = logging.get_logger(__name__)
a_ :List[Any] = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class lowercase ( A_ ):
lowerCamelCase : Dict = "deformable_detr"
lowerCamelCase : Dict = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Any , _lowercase : List[str]=True , _lowercase : int=None , _lowercase : Any=3 , _lowercase : Dict=3_00 , _lowercase : Optional[Any]=10_24 , _lowercase : Optional[Any]=6 , _lowercase : Union[str, Any]=10_24 , _lowercase : Optional[int]=8 , _lowercase : Tuple=6 , _lowercase : Dict=10_24 , _lowercase : Optional[Any]=8 , _lowercase : Any=0.0 , _lowercase : Tuple=True , _lowercase : Any="relu" , _lowercase : Tuple=2_56 , _lowercase : Union[str, Any]=0.1 , _lowercase : int=0.0 , _lowercase : Any=0.0 , _lowercase : Optional[int]=0.02 , _lowercase : Optional[Any]=1.0 , _lowercase : Any=True , _lowercase : List[str]=False , _lowercase : List[str]="sine" , _lowercase : Optional[Any]="resnet50" , _lowercase : int=True , _lowercase : Union[str, Any]=False , _lowercase : int=4 , _lowercase : Any=4 , _lowercase : Any=4 , _lowercase : int=False , _lowercase : str=3_00 , _lowercase : Dict=False , _lowercase : Optional[Any]=1 , _lowercase : Tuple=5 , _lowercase : List[Any]=2 , _lowercase : Optional[Any]=1 , _lowercase : Dict=1 , _lowercase : int=5 , _lowercase : Optional[Any]=2 , _lowercase : List[str]=0.1 , _lowercase : Tuple=0.25 , _lowercase : List[str]=False , **_lowercase : Dict , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
SCREAMING_SNAKE_CASE__ : Tuple = CONFIG_MAPPING["resnet"](out_features=['''stage4'''] )
elif isinstance(snake_case__ , snake_case__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = backbone_config.get('''model_type''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE__ : Optional[int] = config_class.from_dict(snake_case__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_timm_backbone
SCREAMING_SNAKE_CASE__ : Optional[Any] = backbone_config
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_queries
SCREAMING_SNAKE_CASE__ : Any = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Dict = d_model
SCREAMING_SNAKE_CASE__ : int = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ : List[Any] = encoder_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = encoder_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : str = decoder_layers
SCREAMING_SNAKE_CASE__ : Dict = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dropout
SCREAMING_SNAKE_CASE__ : Any = attention_dropout
SCREAMING_SNAKE_CASE__ : Optional[Any] = activation_dropout
SCREAMING_SNAKE_CASE__ : Any = activation_function
SCREAMING_SNAKE_CASE__ : List[Any] = init_std
SCREAMING_SNAKE_CASE__ : Dict = init_xavier_std
SCREAMING_SNAKE_CASE__ : int = encoder_layerdrop
SCREAMING_SNAKE_CASE__ : Optional[int] = auxiliary_loss
SCREAMING_SNAKE_CASE__ : Optional[Any] = position_embedding_type
SCREAMING_SNAKE_CASE__ : int = backbone
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_pretrained_backbone
SCREAMING_SNAKE_CASE__ : List[str] = dilation
# deformable attributes
SCREAMING_SNAKE_CASE__ : Any = num_feature_levels
SCREAMING_SNAKE_CASE__ : int = encoder_n_points
SCREAMING_SNAKE_CASE__ : Any = decoder_n_points
SCREAMING_SNAKE_CASE__ : str = two_stage
SCREAMING_SNAKE_CASE__ : Optional[int] = two_stage_num_proposals
SCREAMING_SNAKE_CASE__ : Optional[Any] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
SCREAMING_SNAKE_CASE__ : str = class_cost
SCREAMING_SNAKE_CASE__ : Any = bbox_cost
SCREAMING_SNAKE_CASE__ : List[str] = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE__ : str = mask_loss_coefficient
SCREAMING_SNAKE_CASE__ : Optional[Any] = dice_loss_coefficient
SCREAMING_SNAKE_CASE__ : int = bbox_loss_coefficient
SCREAMING_SNAKE_CASE__ : int = giou_loss_coefficient
SCREAMING_SNAKE_CASE__ : Any = eos_coefficient
SCREAMING_SNAKE_CASE__ : Any = focal_alpha
SCREAMING_SNAKE_CASE__ : str = disable_custom_kernels
super().__init__(is_encoder_decoder=snake_case__ , **snake_case__ )
@property
def lowercase__ ( self : Any ):
return self.encoder_attention_heads
@property
def lowercase__ ( self : Union[str, Any] ):
return self.d_model
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Any = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
SCREAMING_SNAKE_CASE__ : Dict = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.__class__.model_type
return output
| 35 |
def UpperCamelCase ( __lowerCamelCase : int = 1 , __lowerCamelCase : int = 1000 ):
snake_case : int = 1
snake_case : int = 0
for divide_by_number in range(__lowerCamelCase , digit + 1 ):
snake_case : list[int] = []
snake_case : Optional[int] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__lowerCamelCase ):
snake_case : List[Any] = len(__lowerCamelCase )
snake_case : List[str] = divide_by_number
else:
has_been_divided.append(__lowerCamelCase )
snake_case : Any = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 204 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class __lowerCamelCase ( lowerCamelCase_ ):
"""simple docstring"""
a_: str = """vivit"""
def __init__( self : Optional[Any] , lowerCamelCase_ : int=224 , lowerCamelCase_ : Dict=32 , lowerCamelCase_ : List[str]=[2, 16, 16] , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : Union[str, Any]=768 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : List[str]=12 , lowerCamelCase_ : int=3072 , lowerCamelCase_ : Optional[Any]="gelu_fast" , lowerCamelCase_ : List[str]=0.0 , lowerCamelCase_ : Optional[int]=0.0 , lowerCamelCase_ : str=0.02 , lowerCamelCase_ : List[Any]=1e-06 , lowerCamelCase_ : Tuple=True , **lowerCamelCase_ : Tuple , ):
_lowerCAmelCase =hidden_size
_lowerCAmelCase =num_hidden_layers
_lowerCAmelCase =num_attention_heads
_lowerCAmelCase =intermediate_size
_lowerCAmelCase =hidden_act
_lowerCAmelCase =hidden_dropout_prob
_lowerCAmelCase =attention_probs_dropout_prob
_lowerCAmelCase =initializer_range
_lowerCAmelCase =layer_norm_eps
_lowerCAmelCase =image_size
_lowerCAmelCase =num_frames
_lowerCAmelCase =tubelet_size
_lowerCAmelCase =num_channels
_lowerCAmelCase =qkv_bias
super().__init__(**lowerCamelCase_ )
| 149 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__SCREAMING_SNAKE_CASE : Optional[Any] = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 149 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
SCREAMING_SNAKE_CASE_ = {
"""configuration_ernie""": ["""ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ErnieConfig""", """ErnieOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ErnieForCausalLM""",
"""ErnieForMaskedLM""",
"""ErnieForMultipleChoice""",
"""ErnieForNextSentencePrediction""",
"""ErnieForPreTraining""",
"""ErnieForQuestionAnswering""",
"""ErnieForSequenceClassification""",
"""ErnieForTokenClassification""",
"""ErnieModel""",
"""ErniePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 237 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE_ = {
"""configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""],
"""tokenization_ctrl""": ["""CTRLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CTRLForSequenceClassification""",
"""CTRLLMHeadModel""",
"""CTRLModel""",
"""CTRLPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCTRLForSequenceClassification""",
"""TFCTRLLMHeadModel""",
"""TFCTRLModel""",
"""TFCTRLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 237 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =TFCamembertModel.from_pretrained('jplu/tf-camembert-base' )
SCREAMING_SNAKE_CASE =tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] ,dtype=tf.intaa ,) # J'aime le camembert !"
SCREAMING_SNAKE_CASE =model(snake_case )["last_hidden_state"]
SCREAMING_SNAKE_CASE =tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape ,snake_case )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE =tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] ,dtype=tf.floataa ,)
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 713 |
from ...processing_utils import ProcessorMixin
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = ['image_processor', 'feature_extractor']
__UpperCAmelCase = 'TvltImageProcessor'
__UpperCAmelCase = 'TvltFeatureExtractor'
def __init__( self : Optional[int] ,snake_case : List[str] ,snake_case : Dict ):
super().__init__(image_processor=snake_case ,feature_extractor=snake_case )
SCREAMING_SNAKE_CASE =image_processor
SCREAMING_SNAKE_CASE =feature_extractor
def __call__( self : Dict ,snake_case : Union[str, Any]=None ,snake_case : Optional[int]=None ,snake_case : List[Any]=None ,snake_case : int=None ,snake_case : List[Any]=False ,snake_case : Optional[int]=False ,*snake_case : int ,**snake_case : str ,):
if images is None and audio is None:
raise ValueError('You need to specify either an `images` or `audio` input to process.' )
SCREAMING_SNAKE_CASE =None
if images is not None:
SCREAMING_SNAKE_CASE =self.image_processor(snake_case ,mask_pixel=snake_case ,*snake_case ,**snake_case )
if images_mixed is not None:
SCREAMING_SNAKE_CASE =self.image_processor(snake_case ,is_mixed=snake_case ,*snake_case ,**snake_case )
if audio is not None:
SCREAMING_SNAKE_CASE =self.feature_extractor(
snake_case ,*snake_case ,sampling_rate=snake_case ,mask_audio=snake_case ,**snake_case )
SCREAMING_SNAKE_CASE ={}
if audio is not None:
output_dict.update(snake_case )
if images is not None:
output_dict.update(snake_case )
if images_mixed_dict is not None:
output_dict.update(snake_case )
return output_dict
@property
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =self.image_processor.model_input_names
SCREAMING_SNAKE_CASE =self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 252 | 0 |
"""simple docstring"""
import math
import os
import sys
def __UpperCamelCase ( snake_case__ ):
A_ : Optional[Any] = """"""
try:
with open(snake_case__ , """rb""" ) as binary_file:
A_ : Union[str, Any] = binary_file.read()
for dat in data:
A_ : Dict = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lexicon.pop(snake_case__ )
A_ : List[str] = last_match_id
if math.loga(snake_case__ ).is_integer():
for curr_key in lexicon:
A_ : Dict = """0""" + lexicon[curr_key]
A_ : int = bin(snake_case__ )[2:]
def __UpperCamelCase ( snake_case__ ):
A_ : Dict = {"""0""": """0""", """1""": """1"""}
A_ , A_ : Optional[int] = """""", """"""
A_ : Tuple = len(snake_case__ )
for i in range(len(snake_case__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
A_ : List[str] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
index += 1
A_ : int = """"""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
A_ : Any = lexicon[curr_string]
result += last_match_id
return result
def __UpperCamelCase ( snake_case__ , snake_case__ ):
A_ : Optional[int] = os.path.getsize(snake_case__ )
A_ : Dict = bin(snake_case__ )[2:]
A_ : Optional[Any] = len(snake_case__ )
return "0" * (length_length - 1) + file_length_binary + compressed
def __UpperCamelCase ( snake_case__ , snake_case__ ):
A_ : Tuple = 8
try:
with open(snake_case__ , """wb""" ) as opened_file:
A_ : Dict = [
to_write[i : i + byte_length]
for i in range(0 , len(snake_case__ ) , snake_case__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(snake_case__ , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def __UpperCamelCase ( snake_case__ , snake_case__ ):
A_ : List[str] = read_file_binary(snake_case__ )
A_ : str = compress_data(snake_case__ )
A_ : int = add_file_length(snake_case__ , snake_case__ )
write_file_binary(snake_case__ , snake_case__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 180 |
"""simple docstring"""
_lowerCAmelCase = [
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 180 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase ):
snake_case_ = UnCLIPImageVariationPipeline
snake_case_ = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
snake_case_ = IMAGE_VARIATION_BATCH_PARAMS
snake_case_ = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
snake_case_ = False
@property
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
return 32
@property
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
return 32
@property
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
return self.time_input_dim
@property
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
return 100
@property
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def _UpperCamelCase ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
A__ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(snake_case )
@property
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
A__ : List[Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(snake_case )
@property
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
A__ : List[str] = {
"""clip_embeddings_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""cross_attention_dim""": self.cross_attention_dim,
}
A__ : Dict = UnCLIPTextProjModel(**snake_case )
return model
@property
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
A__ : int = {
"""sample_size""": 32,
# RGB in channels
"""in_channels""": 3,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 6,
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": """identity""",
}
A__ : Optional[int] = UNetaDConditionModel(**snake_case )
return model
@property
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
A__ : List[str] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(1 )
A__ : Optional[int] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : Optional[Any] = self.dummy_decoder
A__ : str = self.dummy_text_proj
A__ : List[str] = self.dummy_text_encoder
A__ : Optional[int] = self.dummy_tokenizer
A__ : Tuple = self.dummy_super_res_first
A__ : Dict = self.dummy_super_res_last
A__ : Optional[int] = UnCLIPScheduler(
variance_type="""learned_range""" , prediction_type="""epsilon""" , num_train_timesteps=1000 , )
A__ : List[str] = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""epsilon""" , num_train_timesteps=1000 , )
A__ : Dict = CLIPImageProcessor(crop_size=32 , size=32 )
A__ : str = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def _UpperCamelCase ( self : Optional[Any] , snake_case : Optional[Any] , snake_case : List[str]=0 , snake_case : int=True ):
'''simple docstring'''
A__ : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case ) ).to(snake_case )
if str(snake_case ).startswith("""mps""" ):
A__ : List[str] = torch.manual_seed(snake_case )
else:
A__ : Tuple = torch.Generator(device=snake_case ).manual_seed(snake_case )
if pil_image:
A__ : Dict = input_image * 0.5 + 0.5
A__ : int = input_image.clamp(0 , 1 )
A__ : Tuple = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
A__ : str = DiffusionPipeline.numpy_to_pil(snake_case )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : str = """cpu"""
A__ : Union[str, Any] = self.get_dummy_components()
A__ : Dict = self.pipeline_class(**snake_case )
A__ : Optional[Any] = pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
A__ : Any = self.get_dummy_inputs(snake_case , pil_image=snake_case )
A__ : List[str] = pipe(**snake_case )
A__ : Union[str, Any] = output.images
A__ : Optional[int] = self.get_dummy_inputs(snake_case , pil_image=snake_case )
A__ : int = pipe(
**snake_case , return_dict=snake_case , )[0]
A__ : Optional[Any] = image[0, -3:, -3:, -1]
A__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A__ : Optional[int] = np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : Union[str, Any] = """cpu"""
A__ : Dict = self.get_dummy_components()
A__ : Optional[int] = self.pipeline_class(**snake_case )
A__ : Tuple = pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
A__ : Optional[int] = self.get_dummy_inputs(snake_case , pil_image=snake_case )
A__ : Optional[int] = pipe(**snake_case )
A__ : Union[str, Any] = output.images
A__ : Tuple = self.get_dummy_inputs(snake_case , pil_image=snake_case )
A__ : Tuple = pipe(
**snake_case , return_dict=snake_case , )[0]
A__ : str = image[0, -3:, -3:, -1]
A__ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A__ : Tuple = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Dict = """cpu"""
A__ : Tuple = self.get_dummy_components()
A__ : int = self.pipeline_class(**snake_case )
A__ : Optional[int] = pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
A__ : Optional[Any] = self.get_dummy_inputs(snake_case , pil_image=snake_case )
A__ : Optional[int] = [
pipeline_inputs["""image"""],
pipeline_inputs["""image"""],
]
A__ : Optional[Any] = pipe(**snake_case )
A__ : Optional[int] = output.images
A__ : List[Any] = self.get_dummy_inputs(snake_case , pil_image=snake_case )
A__ : List[str] = [
tuple_pipeline_inputs["""image"""],
tuple_pipeline_inputs["""image"""],
]
A__ : Optional[Any] = pipe(
**snake_case , return_dict=snake_case , )[0]
A__ : List[Any] = image[0, -3:, -3:, -1]
A__ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
A__ : Optional[int] = np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : List[str] = torch.device("""cpu""" )
class __SCREAMING_SNAKE_CASE :
snake_case_ = 1
A__ : Any = self.get_dummy_components()
A__ : List[str] = self.pipeline_class(**snake_case )
A__ : List[Any] = pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
A__ : Union[str, Any] = torch.Generator(device=snake_case ).manual_seed(0 )
A__ : str = pipe.decoder.dtype
A__ : List[Any] = 1
A__ : List[str] = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
A__ : List[str] = pipe.prepare_latents(
snake_case , dtype=snake_case , device=snake_case , generator=snake_case , latents=snake_case , scheduler=DummyScheduler() )
A__ : Dict = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
A__ : str = pipe.prepare_latents(
snake_case , dtype=snake_case , device=snake_case , generator=snake_case , latents=snake_case , scheduler=DummyScheduler() )
A__ : List[Any] = self.get_dummy_inputs(snake_case , pil_image=snake_case )
A__ : Dict = pipe(
**snake_case , decoder_latents=snake_case , super_res_latents=snake_case ).images
A__ : Any = self.get_dummy_inputs(snake_case , pil_image=snake_case )
# Don't pass image, instead pass embedding
A__ : Union[str, Any] = pipeline_inputs.pop("""image""" )
A__ : List[Any] = pipe.image_encoder(snake_case ).image_embeds
A__ : Optional[Any] = pipe(
**snake_case , decoder_latents=snake_case , super_res_latents=snake_case , image_embeddings=snake_case , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : Optional[Any] = torch_device == """cpu"""
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
A__ : Union[str, Any] = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=snake_case , expected_max_diff=snake_case )
@skip_mps
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : Tuple = torch_device == """cpu"""
A__ : Any = True
A__ : str = [
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
self._test_inference_batch_single_identical(
test_max_difference=snake_case , relax_max_difference=snake_case , additional_params_copy_to_batched_inputs=snake_case , )
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : int = [
"""decoder_num_inference_steps""",
"""super_res_num_inference_steps""",
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
A__ : int = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=snake_case , additional_params_copy_to_batched_inputs=snake_case , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=snake_case )
@skip_mps
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
A__ : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png""" )
A__ : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/unclip/karlo_v1_alpha_cat_variation_fp16.npy""" )
A__ : List[Any] = UnCLIPImageVariationPipeline.from_pretrained(
"""kakaobrain/karlo-v1-alpha-image-variations""" , torch_dtype=torch.floataa )
A__ : Optional[int] = pipeline.to(snake_case )
pipeline.set_progress_bar_config(disable=snake_case )
A__ : str = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ : Optional[Any] = pipeline(
snake_case , generator=snake_case , output_type="""np""" , )
A__ : Optional[Any] = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(snake_case , snake_case , 15 )
| 498 |
"""simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
A_ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
A_ = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[str] ) ->str:
for attribute in key.split(""".""" ):
A__ : Any = getattr(UpperCAmelCase__, UpperCAmelCase__ )
if weight_type is not None:
A__ : Union[str, Any] = getattr(UpperCAmelCase__, UpperCAmelCase__ ).shape
else:
A__ : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
A__ : List[Any] = value
elif weight_type == "weight_g":
A__ : Any = value
elif weight_type == "weight_v":
A__ : Optional[int] = value
elif weight_type == "bias":
A__ : Any = value
else:
A__ : Any = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Any ) ->Union[str, Any]:
A__ : Dict = []
A__ : int = fairseq_model.state_dict()
A__ : Dict = hf_model.feature_extractor
A__ : Any = hf_model.adapter
for name, value in fairseq_dict.items():
A__ : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, hf_model.config.feat_extract_norm == """group""", )
A__ : Union[str, Any] = True
elif any(x in name for x in ["""adaptor""", """w2v_encoder.proj.""", """w2v_proj_ln."""] ):
load_adapter(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
A__ : int = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
A__ : List[Any] = True
if "*" in mapped_key:
A__ : Union[str, Any] = name.split(UpperCAmelCase__ )[0].split(""".""" )[-2]
A__ : Union[str, Any] = mapped_key.replace("""*""", UpperCAmelCase__ )
if "weight_g" in name:
A__ : List[str] = """weight_g"""
elif "weight_v" in name:
A__ : Optional[Any] = """weight_v"""
elif "bias" in name:
A__ : Optional[int] = """bias"""
elif "weight" in name:
A__ : int = """weight"""
else:
A__ : Optional[Any] = None
set_recursively(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase__ )
logger.warning(f'Unused weights: {unused_weights}' )
def _lowerCAmelCase ( UpperCAmelCase__ : int, UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Any ) ->List[Any]:
A__ : Dict = full_name.split("""conv_layers.""" )[-1]
A__ : Optional[int] = name.split(""".""" )
A__ : int = int(items[0] )
A__ : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
A__ : Optional[Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
A__ : Optional[int] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
A__ : Optional[int] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
A__ : Any = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(UpperCAmelCase__ )
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Dict ) ->str:
A__ : Tuple = full_name.split("""adaptor.""" )[-1]
A__ : Optional[int] = name.split(""".""" )
if items[1].isdigit():
A__ : Optional[Any] = int(items[1] )
else:
A__ : Any = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'
A__ : Union[str, Any] = value
logger.info(f'Adapter proj layer norm bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'
A__ : Tuple = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'
A__ : str = value
logger.info(f'Adapter proj layer bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'
A__ : Union[str, Any] = value
logger.info(f'Adapter proj layer weight was initialized from {full_name}.' )
elif isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'
A__ : Dict = value
logger.info(f'Adapter layer {layer_id} bias was initialized from {full_name}.' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'
A__ : Optional[int] = value
logger.info(f'Adapter layer {layer_id} bias was initialized from {full_name}.' )
else:
unused_weights.append(UpperCAmelCase__ )
def _lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any] ) ->List[Any]:
A__ , A__ : Union[str, Any] = emb.weight.shape
A__ : List[str] = nn.Linear(UpperCAmelCase__, UpperCAmelCase__, bias=UpperCAmelCase__ )
A__ : List[Any] = emb.weight.data
return lin_layer
@torch.no_grad()
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Any, UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Dict, ) ->str:
A__ : Tuple = WavaVecaConfig.from_pretrained(
UpperCAmelCase__, add_adapter=UpperCAmelCase__, adapter_stride=UpperCAmelCase__, adapter_kernel_size=UpperCAmelCase__, use_auth_token=UpperCAmelCase__, output_hidden_size=UpperCAmelCase__, )
A__ : List[Any] = MBartConfig.from_pretrained(UpperCAmelCase__ )
# load model
A__ , A__ , A__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={
"""config_yaml""": config_yaml_path,
"""data""": """/""".join(dict_path.split("""/""" )[:-1] ),
"""w2v_path""": checkpoint_path,
"""load_pretrained_decoder_from""": None,
}, )
A__ : List[Any] = model[0].eval()
# load feature extractor
A__ : Any = WavaVecaFeatureExtractor.from_pretrained(UpperCAmelCase__, use_auth_token=UpperCAmelCase__ )
# set weights for wav2vec2 encoder
A__ : Dict = WavaVecaModel(UpperCAmelCase__ )
recursively_load_weights_wavaveca(model.encoder, UpperCAmelCase__ )
# load decoder weights
A__ : Any = MBartForCausalLM(UpperCAmelCase__ )
A__ , A__ : Union[str, Any] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict(), strict=UpperCAmelCase__ )
logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
A__ : Dict = SpeechEncoderDecoderModel(encoder=UpperCAmelCase__, decoder=UpperCAmelCase__ )
A__ : Optional[Any] = False
A__ : Optional[Any] = MBartaaTokenizer(UpperCAmelCase__ )
tokenizer.save_pretrained(UpperCAmelCase__ )
A__ : Dict = hf_wavavec.config.to_dict()
A__ : List[Any] = tokenizer.pad_token_id
A__ : Optional[Any] = tokenizer.bos_token_id
A__ : List[Any] = tokenizer.eos_token_id
A__ : Tuple = """mbart50"""
A__ : List[str] = """wav2vec2"""
A__ : Optional[int] = tokenizer.eos_token_id
A__ : int = 2_5_0_0_0_4
A__ : Dict = tokenizer.eos_token_id
A__ : str = SpeechEncoderDecoderConfig.from_dict(UpperCAmelCase__ )
hf_wavavec.save_pretrained(UpperCAmelCase__ )
feature_extractor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_yaml_path''', default=None, type=str, help='''Path to yaml file of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-xls-r-1b''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/mbart-large-50-one-to-many-mmt''',
type=str,
help='''Path to hf decoder checkpoint config''',
)
parser.add_argument('''--add_adapter''', default=True, type=bool, help='''whethere to add model adapter layers''')
parser.add_argument('''--adapter_stride''', default=2, type=int, help='''stride of adapter layers''')
parser.add_argument('''--adapter_kernel_size''', default=3, type=int, help='''kernel size of adapter layers''')
parser.add_argument('''--encoder_output_dim''', default=1024, type=int, help='''encoder output dim''')
parser.add_argument('''--start_token_id''', default=25_0004, type=int, help='''`decoder_start_token_id` of model config''')
A_ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 498 | 1 |
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def __snake_case ( lowerCamelCase_ : str , lowerCamelCase_ : str=False ):
'''simple docstring'''
try:
__magic_name__ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__magic_name__ = default
else:
# KEY is set, convert it to True or False.
try:
__magic_name__ = strtobool(lowerCamelCase_ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'If set, {key} must be yes or no.' )
return _value
__magic_name__ : Optional[int] =parse_flag_from_env('RUN_SLOW', default=False)
__magic_name__ : Optional[Any] =parse_flag_from_env('RUN_REMOTE', default=False)
__magic_name__ : Union[str, Any] =parse_flag_from_env('RUN_LOCAL', default=True)
__magic_name__ : Optional[Any] =parse_flag_from_env('RUN_PACKAGED', default=True)
# Compression
__magic_name__ : List[str] =pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4')
__magic_name__ : Optional[Any] =pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr')
__magic_name__ : str =pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard')
# Audio
__magic_name__ : Dict =pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'),
reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ',
)
# Beam
__magic_name__ : Dict =pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'),
reason='test requires apache-beam and a compatible dill version',
)
# Dill-cloudpickle compatibility
__magic_name__ : List[Any] =pytest.mark.skipif(
config.DILL_VERSION <= version.parse('0.3.2'),
reason='test requires dill>0.3.2 for cloudpickle compatibility',
)
# Windows
__magic_name__ : List[str] =pytest.mark.skipif(
sys.platform == 'win32',
reason='test should not be run on Windows',
)
def __snake_case ( lowerCamelCase_ : Tuple ):
'''simple docstring'''
try:
import faiss # noqa
except ImportError:
__magic_name__ = unittest.skip("test requires faiss" )(lowerCamelCase_ )
return test_case
def __snake_case ( lowerCamelCase_ : Dict ):
'''simple docstring'''
try:
import regex # noqa
except ImportError:
__magic_name__ = unittest.skip("test requires regex" )(lowerCamelCase_ )
return test_case
def __snake_case ( lowerCamelCase_ : Dict ):
'''simple docstring'''
try:
import elasticsearch # noqa
except ImportError:
__magic_name__ = unittest.skip("test requires elasticsearch" )(lowerCamelCase_ )
return test_case
def __snake_case ( lowerCamelCase_ : str ):
'''simple docstring'''
try:
import sqlalchemy # noqa
except ImportError:
__magic_name__ = unittest.skip("test requires sqlalchemy" )(lowerCamelCase_ )
return test_case
def __snake_case ( lowerCamelCase_ : Tuple ):
'''simple docstring'''
if not config.TORCH_AVAILABLE:
__magic_name__ = unittest.skip("test requires PyTorch" )(lowerCamelCase_ )
return test_case
def __snake_case ( lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
if not config.TF_AVAILABLE:
__magic_name__ = unittest.skip("test requires TensorFlow" )(lowerCamelCase_ )
return test_case
def __snake_case ( lowerCamelCase_ : str ):
'''simple docstring'''
if not config.JAX_AVAILABLE:
__magic_name__ = unittest.skip("test requires JAX" )(lowerCamelCase_ )
return test_case
def __snake_case ( lowerCamelCase_ : str ):
'''simple docstring'''
if not config.PIL_AVAILABLE:
__magic_name__ = unittest.skip("test requires Pillow" )(lowerCamelCase_ )
return test_case
def __snake_case ( lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers" )(lowerCamelCase_ )
else:
return test_case
def __snake_case ( lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken" )(lowerCamelCase_ )
else:
return test_case
def __snake_case ( lowerCamelCase_ : List[Any] ):
'''simple docstring'''
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy" )(lowerCamelCase_ )
else:
return test_case
def __snake_case ( lowerCamelCase_ : Tuple ):
'''simple docstring'''
def _require_spacy_model(lowerCamelCase_ : List[str] ):
try:
import spacy # noqa F401
spacy.load(lowerCamelCase_ )
except ImportError:
return unittest.skip("test requires spacy" )(lowerCamelCase_ )
except OSError:
return unittest.skip("test requires spacy model '{}'".format(lowerCamelCase_ ) )(lowerCamelCase_ )
else:
return test_case
return _require_spacy_model
def __snake_case ( lowerCamelCase_ : Tuple ):
'''simple docstring'''
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark" )(lowerCamelCase_ )
else:
return test_case
def __snake_case ( lowerCamelCase_ : List[str] ):
'''simple docstring'''
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark" )(lowerCamelCase_ )
else:
return test_case
def __snake_case ( lowerCamelCase_ : Any ):
'''simple docstring'''
if not _run_slow_tests or _run_slow_tests == 0:
__magic_name__ = unittest.skip("test is slow" )(lowerCamelCase_ )
return test_case
def __snake_case ( lowerCamelCase_ : str ):
'''simple docstring'''
if not _run_local_tests or _run_local_tests == 0:
__magic_name__ = unittest.skip("test is local" )(lowerCamelCase_ )
return test_case
def __snake_case ( lowerCamelCase_ : Tuple ):
'''simple docstring'''
if not _run_packaged_tests or _run_packaged_tests == 0:
__magic_name__ = unittest.skip("test is packaged" )(lowerCamelCase_ )
return test_case
def __snake_case ( lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
if not _run_remote_tests or _run_remote_tests == 0:
__magic_name__ = unittest.skip("test requires remote" )(lowerCamelCase_ )
return test_case
def __snake_case ( *lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
def decorate(cls : Optional[int] ):
for name, fn in cls.__dict__.items():
if callable(lowerCamelCase_ ) and name.startswith("test" ):
for decorator in decorators:
__magic_name__ = decorator(lowerCamelCase_ )
setattr(cls , lowerCamelCase_ , lowerCamelCase_ )
return cls
return decorate
class UpperCamelCase_ ( A ):
"""simple docstring"""
pass
class UpperCamelCase_ ( A ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Optional[Any] = 1
UpperCAmelCase__ : List[Any] = 2
@contextmanager
def __snake_case ( lowerCamelCase_ : str=OfflineSimulationMode.CONNECTION_FAILS , lowerCamelCase_ : Tuple=1e-16 ):
'''simple docstring'''
__magic_name__ = requests.Session().request
def timeout_request(lowerCamelCase_ : int , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : Union[str, Any] ):
# Change the url to an invalid url so that the connection hangs
__magic_name__ = "https://10.255.255.1"
if kwargs.get("timeout" ) is None:
raise RequestWouldHangIndefinitelyError(
F'Tried a call to {url} in offline mode with no timeout set. Please set a timeout.' )
__magic_name__ = timeout
try:
return online_request(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__magic_name__ = url
__magic_name__ = e.args[0]
__magic_name__ = (max_retry_error.args[0].replace("10.255.255.1" , F'OfflineMock[{url}]' ),)
__magic_name__ = (max_retry_error,)
raise
def raise_connection_error(lowerCamelCase_ : str , lowerCamelCase_ : Any , **lowerCamelCase_ : Union[str, Any] ):
raise requests.ConnectionError("Offline mode is enabled." , request=lowerCamelCase_ )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send" , lowerCamelCase_ ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request" , lowerCamelCase_ ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE" , lowerCamelCase_ ):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum." )
@contextmanager
def __snake_case ( *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : int ):
'''simple docstring'''
__magic_name__ = str(Path().resolve() )
with tempfile.TemporaryDirectory(*lowerCamelCase_ , **lowerCamelCase_ ) as tmp_dir:
try:
os.chdir(lowerCamelCase_ )
yield
finally:
os.chdir(lowerCamelCase_ )
@contextmanager
def __snake_case ( ):
'''simple docstring'''
import gc
gc.collect()
__magic_name__ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def __snake_case ( ):
'''simple docstring'''
import gc
gc.collect()
__magic_name__ = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def __snake_case ( lowerCamelCase_ : str , lowerCamelCase_ : Tuple ):
'''simple docstring'''
return deepcopy(lowerCamelCase_ ).integers(0 , 100 , 10 ).tolist() == deepcopy(lowerCamelCase_ ).integers(0 , 100 , 10 ).tolist()
def __snake_case ( lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
import decorator
from requests.exceptions import HTTPError
def _wrapper(lowerCamelCase_ : Dict , *lowerCamelCase_ : List[Any] , **lowerCamelCase_ : Any ):
try:
return func(*lowerCamelCase_ , **lowerCamelCase_ )
except HTTPError as err:
if str(lowerCamelCase_ ).startswith("500" ) or str(lowerCamelCase_ ).startswith("502" ):
pytest.xfail(str(lowerCamelCase_ ) )
raise err
return decorator.decorator(_wrapper , lowerCamelCase_ )
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : List[str] , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[Any] ) -> str:
__magic_name__ = returncode
__magic_name__ = stdout
__magic_name__ = stderr
async def __snake_case ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
while True:
__magic_name__ = await stream.readline()
if line:
callback(lowerCamelCase_ )
else:
break
async def __snake_case ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Any=None , lowerCamelCase_ : Union[str, Any]=False , lowerCamelCase_ : Optional[Any]=False ):
'''simple docstring'''
if echo:
print("\nRunning: " , " ".join(lowerCamelCase_ ) )
__magic_name__ = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowerCamelCase_ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCamelCase_ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__magic_name__ = []
__magic_name__ = []
def tee(lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[str]="" ):
__magic_name__ = line.decode("utf-8" ).rstrip()
sink.append(lowerCamelCase_ )
if not quiet:
print(lowerCamelCase_ , lowerCamelCase_ , file=lowerCamelCase_ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda lowerCamelCase_ : tee(lowerCamelCase_ , lowerCamelCase_ , sys.stdout , label="stdout:" ) ),
_read_stream(p.stderr , lambda lowerCamelCase_ : tee(lowerCamelCase_ , lowerCamelCase_ , sys.stderr , label="stderr:" ) ),
] , timeout=lowerCamelCase_ , )
return _RunOutput(await p.wait() , lowerCamelCase_ , lowerCamelCase_ )
def __snake_case ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : int=None , lowerCamelCase_ : Union[str, Any]=180 , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[str]=True ):
'''simple docstring'''
__magic_name__ = asyncio.get_event_loop()
__magic_name__ = loop.run_until_complete(
_stream_subprocess(lowerCamelCase_ , env=lowerCamelCase_ , stdin=lowerCamelCase_ , timeout=lowerCamelCase_ , quiet=lowerCamelCase_ , echo=lowerCamelCase_ ) )
__magic_name__ = " ".join(lowerCamelCase_ )
if result.returncode > 0:
__magic_name__ = "\n".join(result.stderr )
raise RuntimeError(
F'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
F'The combined stderr from workers follows:\n{stderr}' )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F'\'{cmd_str}\' produced no output.' )
return result
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = os.environ.get("PYTEST_XDIST_WORKER" , "gw0" )
__magic_name__ = re.sub(R"^gw" , "" , lowerCamelCase_ , 0 , re.M )
return int(lowerCamelCase_ )
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = 2_9500
__magic_name__ = pytest_xdist_worker_id()
return port + uniq_delta
| 664 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ : Optional[Any] ={
'configuration_longformer': [
'LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LongformerConfig',
'LongformerOnnxConfig',
],
'tokenization_longformer': ['LongformerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : int =['LongformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Dict =[
'LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongformerForMaskedLM',
'LongformerForMultipleChoice',
'LongformerForQuestionAnswering',
'LongformerForSequenceClassification',
'LongformerForTokenClassification',
'LongformerModel',
'LongformerPreTrainedModel',
'LongformerSelfAttention',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Tuple =[
'TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLongformerForMaskedLM',
'TFLongformerForMultipleChoice',
'TFLongformerForQuestionAnswering',
'TFLongformerForSequenceClassification',
'TFLongformerForTokenClassification',
'TFLongformerModel',
'TFLongformerPreTrainedModel',
'TFLongformerSelfAttention',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
__magic_name__ : int =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 664 | 1 |
"""simple docstring"""
from __future__ import annotations
from math import ceil, floor, sqrt
def snake_case ( A__ = 2_00_00_00 ):
UpperCAmelCase_ : list[int] = [0]
UpperCAmelCase_ : int
for idx in range(1 ,ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
UpperCAmelCase_ : int = 0
# the area corresponding to the grid that gives the product closest to target
UpperCAmelCase_ : int = 0
# an estimate of b, using the quadratic formula
UpperCAmelCase_ : float
# the largest integer less than b_estimate
UpperCAmelCase_ : int
# the largest integer less than b_estimate
UpperCAmelCase_ : int
# the triangle number corresponding to b_floor
UpperCAmelCase_ : int
# the triangle number corresponding to b_ceil
UpperCAmelCase_ : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] ,1 ):
UpperCAmelCase_ : Union[str, Any] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
UpperCAmelCase_ : Optional[Any] = floor(A__ )
UpperCAmelCase_ : str = ceil(A__ )
UpperCAmelCase_ : List[str] = triangle_numbers[b_floor]
UpperCAmelCase_ : Optional[Any] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
UpperCAmelCase_ : int = triangle_b_first_guess * triangle_a
UpperCAmelCase_ : Union[str, Any] = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
UpperCAmelCase_ : Tuple = triangle_b_second_guess * triangle_a
UpperCAmelCase_ : Any = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f'{solution() = }')
| 463 |
"""simple docstring"""
from itertools import count
def snake_case ( A__ = 50 ):
UpperCAmelCase_ : Any = [1] * min_block_length
for n in count(A__ ):
fill_count_functions.append(1 )
for block_length in range(A__ ,n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_00_00_00:
break
return n
if __name__ == "__main__":
print(f'{solution() = }')
| 463 | 1 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
UpperCamelCase__ = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class a ( unittest.TestCase ):
@classmethod
def __snake_case ( cls ):
UpperCAmelCase__ : Optional[Any] = TOKEN
HfFolder.save_token(UpperCamelCase_ )
@classmethod
def __snake_case ( cls ):
try:
delete_repo(token=cls._token , repo_id='test-model-flax' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-model-flax-org' )
except HTTPError:
pass
def __snake_case ( self ):
UpperCAmelCase__ : Optional[int] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase__ : List[str] = FlaxBertModel(UpperCamelCase_ )
model.push_to_hub('test-model-flax' , use_auth_token=self._token )
UpperCAmelCase__ : Any = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
UpperCAmelCase__ : List[str] = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase__ : List[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase__ : Dict = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCamelCase_ , 1E-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='test-model-flax' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(UpperCamelCase_ , repo_id='test-model-flax' , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
UpperCAmelCase__ : Dict = FlaxBertModel.from_pretrained(F'''{USER}/test-model-flax''' )
UpperCAmelCase__ : Any = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase__ : Optional[int] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase__ : Tuple = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCamelCase_ , 1E-3 , msg=F'''{key} not identical''' )
def __snake_case ( self ):
UpperCAmelCase__ : Dict = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
UpperCAmelCase__ : Dict = FlaxBertModel(UpperCamelCase_ )
model.push_to_hub('valid_org/test-model-flax-org' , use_auth_token=self._token )
UpperCAmelCase__ : Optional[int] = FlaxBertModel.from_pretrained('valid_org/test-model-flax-org' )
UpperCAmelCase__ : Tuple = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase__ : Union[str, Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase__ : Any = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCamelCase_ , 1E-3 , msg=F'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-model-flax-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
UpperCamelCase_ , repo_id='valid_org/test-model-flax-org' , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
UpperCAmelCase__ : Any = FlaxBertModel.from_pretrained('valid_org/test-model-flax-org' )
UpperCAmelCase__ : Optional[int] = flatten_dict(unfreeze(model.params ) )
UpperCAmelCase__ : int = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
UpperCAmelCase__ : List[str] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCamelCase_ , 1E-3 , msg=F'''{key} not identical''' )
def lowerCamelCase ( _snake_case ,_snake_case ):
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : Optional[int] = flatten_dict(modela.params )
UpperCAmelCase__ : int = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
UpperCAmelCase__ : str = False
return models_are_equal
@require_flax
class a ( unittest.TestCase ):
def __snake_case ( self ):
UpperCAmelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
UpperCAmelCase__ : Optional[int] = FlaxBertModel(UpperCamelCase_ )
UpperCAmelCase__ : Dict = 'bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) )
with self.assertRaises(UpperCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = FlaxBertModel.from_pretrained(UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = FlaxBertModel.from_pretrained(UpperCamelCase_ , subfolder=UpperCamelCase_ )
self.assertTrue(check_models_equal(UpperCamelCase_ , UpperCamelCase_ ) )
def __snake_case ( self ):
UpperCAmelCase__ : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
UpperCAmelCase__ : Optional[Any] = FlaxBertModel(UpperCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = 'bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , max_shard_size='10KB' )
with self.assertRaises(UpperCamelCase_ ):
UpperCAmelCase__ : Optional[Any] = FlaxBertModel.from_pretrained(UpperCamelCase_ )
UpperCAmelCase__ : List[str] = FlaxBertModel.from_pretrained(UpperCamelCase_ , subfolder=UpperCamelCase_ )
self.assertTrue(check_models_equal(UpperCamelCase_ , UpperCamelCase_ ) )
def __snake_case ( self ):
UpperCAmelCase__ : int = 'bert'
UpperCAmelCase__ : Dict = 'hf-internal-testing/tiny-random-bert-subfolder'
with self.assertRaises(UpperCamelCase_ ):
UpperCAmelCase__ : Any = FlaxBertModel.from_pretrained(UpperCamelCase_ )
UpperCAmelCase__ : Dict = FlaxBertModel.from_pretrained(UpperCamelCase_ , subfolder=UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def __snake_case ( self ):
UpperCAmelCase__ : str = 'bert'
UpperCAmelCase__ : Optional[int] = 'hf-internal-testing/tiny-random-bert-sharded-subfolder'
with self.assertRaises(UpperCamelCase_ ):
UpperCAmelCase__ : Tuple = FlaxBertModel.from_pretrained(UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = FlaxBertModel.from_pretrained(UpperCamelCase_ , subfolder=UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
| 110 |
def a__ ( lowercase__ = 2_0_0 ):
'''simple docstring'''
UpperCAmelCase_ =[1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
UpperCAmelCase_ =[0] * (pence + 1)
UpperCAmelCase_ =1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowercase__ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 7_3682
| 54 | 0 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __lowerCamelCase ( lowerCamelCase_ ):
"""simple docstring"""
a_: List[Any] = """"""
a_: str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
a_: str = None # compression type in fsspec. ex: "gzip"
a_: str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : List[str] , lowerCamelCase_ : str = "" , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[dict] = None , **lowerCamelCase_ : List[Any] ):
super().__init__(self , **lowerCamelCase_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
_lowerCAmelCase =fsspec.open(
lowerCamelCase_ , mode="""rb""" , protocol=lowerCamelCase_ , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
_lowerCAmelCase =os.path.basename(self.file.path.split("""::""" )[0] )
_lowerCAmelCase =(
self.compressed_name[: self.compressed_name.rindex(""".""" )]
if """.""" in self.compressed_name
else self.compressed_name
)
_lowerCAmelCase =None
@classmethod
def lowerCAmelCase__ ( cls : List[Any] , lowerCamelCase_ : Optional[int] ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(lowerCamelCase_ ).lstrip("""/""" )
def lowerCAmelCase__ ( self : Optional[Any] ):
if self.dir_cache is None:
_lowerCAmelCase ={**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name}
_lowerCAmelCase ={f["""name"""]: f}
def lowerCAmelCase__ ( self : str , lowerCamelCase_ : str ):
return self.file.open().read()
def lowerCAmelCase__ ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : str = "rb" , lowerCamelCase_ : Optional[Any]=None , lowerCamelCase_ : Dict=True , lowerCamelCase_ : str=None , **lowerCamelCase_ : List[Any] , ):
_lowerCAmelCase =self._strip_protocol(lowerCamelCase_ )
if mode != "rb":
raise ValueError(F"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" )
return self.file.open()
class __lowerCamelCase ( lowerCamelCase_ ):
"""simple docstring"""
a_: Optional[int] = """bz2"""
a_: Optional[Any] = """bz2"""
a_: Any = """.bz2"""
class __lowerCamelCase ( lowerCamelCase_ ):
"""simple docstring"""
a_: List[str] = """gzip"""
a_: List[Any] = """gzip"""
a_: Optional[int] = """.gz"""
class __lowerCamelCase ( lowerCamelCase_ ):
"""simple docstring"""
a_: int = """lz4"""
a_: int = """lz4"""
a_: Union[str, Any] = """.lz4"""
class __lowerCamelCase ( lowerCamelCase_ ):
"""simple docstring"""
a_: Any = """xz"""
a_: Optional[Any] = """xz"""
a_: Tuple = """.xz"""
class __lowerCamelCase ( lowerCamelCase_ ):
"""simple docstring"""
a_: Union[str, Any] = """zstd"""
a_: Optional[int] = """zstd"""
a_: Union[str, Any] = """.zst"""
def __init__( self : List[str] , lowerCamelCase_ : str , lowerCamelCase_ : str = "rb" , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[dict] = None , lowerCamelCase_ : int = DEFAULT_BLOCK_SIZE , **lowerCamelCase_ : Optional[Any] , ):
super().__init__(
fo=lowerCamelCase_ , mode=lowerCamelCase_ , target_protocol=lowerCamelCase_ , target_options=lowerCamelCase_ , block_size=lowerCamelCase_ , **lowerCamelCase_ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
_lowerCAmelCase =self.file.__enter__
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase_ : int ):
_lowerCAmelCase =file_
def __enter__( self : Optional[Any] ):
self._file.__enter__()
return self
def __exit__( self : Optional[Any] , *lowerCamelCase_ : str , **lowerCamelCase_ : List[Any] ):
self._file.__exit__(*lowerCamelCase_ , **lowerCamelCase_ )
def __iter__( self : Union[str, Any] ):
return iter(self._file )
def lowerCAmelCase__ ( self : str ):
return next(self._file )
def __getattr__( self : Optional[int] , lowerCamelCase_ : Any ):
return getattr(self._file , lowerCamelCase_ )
def fixed_enter(*lowerCamelCase_ : int , **lowerCamelCase_ : List[Any] ):
return WrappedFile(_enter(*lowerCamelCase_ , **lowerCamelCase_ ) )
_lowerCAmelCase =fixed_enter
| 710 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ ( lowercase__ : Any , lowercase__ : List[str] , lowercase__ : List[str] ):
'''simple docstring'''
_lowerCAmelCase =BertConfig.from_json_file(lowercase__ )
print(f"Building PyTorch model from configuration: {config}" )
_lowerCAmelCase =BertForPreTraining(lowercase__ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , lowercase__ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__SCREAMING_SNAKE_CASE : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 149 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
_A = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_A = 25_00_04
_A = 25_00_20
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = MBartaaTokenizer
SCREAMING_SNAKE_CASE = MBartaaTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def _a (self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ : Optional[int] = MBartaaTokenizer(_lowerCamelCase , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=_lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : int = """<s>"""
UpperCAmelCase__ : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ) , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(_lowerCamelCase ) , 1054 )
def _a (self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1054 )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = MBartaaTokenizer(_lowerCamelCase , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=_lowerCamelCase )
UpperCAmelCase__ : List[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_lowerCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase__ : int = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_lowerCamelCase , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
UpperCAmelCase__ : Tuple = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
UpperCAmelCase__ : List[Any] = tokenizer.convert_ids_to_tokens(_lowerCamelCase )
self.assertListEqual(
_lowerCamelCase , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : int = {"""input_ids""": [[250004, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [250004, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250004, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCamelCase , model_name="""facebook/mbart-large-50""" , revision="""d3913889c59cd5c9e456b269c376325eabad57e2""" , )
def _a (self ):
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
UpperCAmelCase__ : Union[str, Any] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart50""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCAmelCase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
UpperCAmelCase__ : Any = self.tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
UpperCAmelCase__ : Optional[int] = tempfile.mkdtemp()
UpperCAmelCase__ : str = tokenizer_r.save_pretrained(_lowerCamelCase )
UpperCAmelCase__ : Tuple = tokenizer_p.save_pretrained(_lowerCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
UpperCAmelCase__ : List[str] = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(_lowerCamelCase , _lowerCamelCase )
# Checks everything loads correctly in the same way
UpperCAmelCase__ : Dict = tokenizer_r.from_pretrained(_lowerCamelCase )
UpperCAmelCase__ : List[Any] = tokenizer_p.from_pretrained(_lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCamelCase , _lowerCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_lowerCamelCase )
# Save tokenizer rust, legacy_format=True
UpperCAmelCase__ : Optional[int] = tempfile.mkdtemp()
UpperCAmelCase__ : List[Any] = tokenizer_r.save_pretrained(_lowerCamelCase , legacy_format=_lowerCamelCase )
UpperCAmelCase__ : str = tokenizer_p.save_pretrained(_lowerCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(_lowerCamelCase , _lowerCamelCase )
# Checks everything loads correctly in the same way
UpperCAmelCase__ : Tuple = tokenizer_r.from_pretrained(_lowerCamelCase )
UpperCAmelCase__ : Optional[int] = tokenizer_p.from_pretrained(_lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCamelCase , _lowerCamelCase ) )
shutil.rmtree(_lowerCamelCase )
# Save tokenizer rust, legacy_format=False
UpperCAmelCase__ : int = tempfile.mkdtemp()
UpperCAmelCase__ : Dict = tokenizer_r.save_pretrained(_lowerCamelCase , legacy_format=_lowerCamelCase )
UpperCAmelCase__ : int = tokenizer_p.save_pretrained(_lowerCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCAmelCase__ : Dict = tokenizer_r.from_pretrained(_lowerCamelCase )
UpperCAmelCase__ : Dict = tokenizer_p.from_pretrained(_lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_lowerCamelCase , _lowerCamelCase ) )
shutil.rmtree(_lowerCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'facebook/mbart-large-50-one-to-many-mmt'
SCREAMING_SNAKE_CASE = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
SCREAMING_SNAKE_CASE = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
SCREAMING_SNAKE_CASE = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2]
@classmethod
def _a (cls ):
"""simple docstring"""
UpperCAmelCase__ : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
UpperCAmelCase__ : Dict = 1
return cls
def _a (self ):
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""mr_IN"""] , 250038 )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
self.assertIn(_lowerCamelCase , self.tokenizer.all_special_ids )
UpperCAmelCase__ : Any = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
UpperCAmelCase__ : Dict = self.tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
UpperCAmelCase__ : Any = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
self.assertNotIn(self.tokenizer.eos_token , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Dict = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , _lowerCamelCase )
UpperCAmelCase__ : Optional[int] = 10
UpperCAmelCase__ : Dict = self.tokenizer(_lowerCamelCase , max_length=_lowerCamelCase , truncation=_lowerCamelCase ).input_ids[0]
self.assertEqual(ids[0] , _lowerCamelCase )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
def _a (self ):
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250053, 250001] )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : int = tempfile.mkdtemp()
UpperCAmelCase__ : str = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_lowerCamelCase )
UpperCAmelCase__ : int = MBartaaTokenizer.from_pretrained(_lowerCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _lowerCamelCase )
@require_torch
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_lowerCamelCase , return_tensors="""pt""" )
UpperCAmelCase__ : Optional[int] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
UpperCAmelCase__ : Optional[int] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
UpperCAmelCase__ : str = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _lowerCamelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = self.tokenizer(self.src_text , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=3 , return_tensors="""pt""" )
UpperCAmelCase__ : str = self.tokenizer(
text_target=self.tgt_text , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=10 , return_tensors="""pt""" )
UpperCAmelCase__ : Union[str, Any] = targets["""input_ids"""]
UpperCAmelCase__ : Optional[int] = shift_tokens_right(_lowerCamelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , {
# en_XX, A, test, EOS
"""input_ids""": [[250004, 62, 3034, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 250001,
} , )
| 182 |
"""simple docstring"""
import requests
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> None:
UpperCAmelCase__ : List[str] = {"""Content-Type""": """application/json"""}
UpperCAmelCase__ : List[str] = requests.post(lowerCAmelCase , json={"""text""": message_body} , headers=lowerCAmelCase )
if response.status_code != 2_00:
UpperCAmelCase__ : str = (
"""Request to slack returned an error """
F"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(lowerCAmelCase )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
| 182 | 1 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
snake_case_ = 0
snake_case_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
snake_case_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
snake_case_ = tuple[int, int]
class _lowercase :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
A : Union[str, Any] = pos_x
A : Dict = pos_y
A : Tuple = (pos_y, pos_x)
A : Dict = goal_x
A : List[Any] = goal_y
A : Dict = g_cost
A : Optional[int] = parent
A : Optional[int] = self.calculate_heuristic()
A : Optional[int] = self.g_cost + self.h_cost
def snake_case ( self ):
A : Dict = self.pos_x - self.goal_x
A : Dict = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowerCAmelCase_ ) + abs(lowerCAmelCase_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , _UpperCAmelCase ):
return self.f_cost < other.f_cost
class _lowercase :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
A : int = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCAmelCase_ )
A : Dict = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , lowerCAmelCase_ )
A : Optional[Any] = [self.start]
A : List[Any] = []
A : str = False
def snake_case ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
A : List[Any] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowerCAmelCase_ )
self.closed_nodes.append(lowerCAmelCase_ )
A : Optional[Any] = self.get_successors(lowerCAmelCase_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCAmelCase_ )
else:
# retrieve the best current path
A : Optional[Any] = self.open_nodes.pop(self.open_nodes.index(lowerCAmelCase_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCAmelCase_ )
else:
self.open_nodes.append(lowerCAmelCase_ )
return [self.start.pos]
def snake_case ( self , _UpperCAmelCase ):
A : str = []
for action in delta:
A : List[Any] = parent.pos_x + action[1]
A : Optional[int] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCAmelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCAmelCase_ , lowerCAmelCase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCAmelCase_ , ) )
return successors
def snake_case ( self , _UpperCAmelCase ):
A : Optional[int] = node
A : Any = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
A : List[str] = current_node.parent
path.reverse()
return path
class _lowercase :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
A : Optional[int] = AStar(lowerCAmelCase_ , lowerCAmelCase_ )
A : str = AStar(lowerCAmelCase_ , lowerCAmelCase_ )
A : str = False
def snake_case ( self ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
A : Any = self.fwd_astar.open_nodes.pop(0 )
A : Any = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowerCAmelCase_ , lowerCAmelCase_ )
self.fwd_astar.closed_nodes.append(lowerCAmelCase_ )
self.bwd_astar.closed_nodes.append(lowerCAmelCase_ )
A : Optional[Any] = current_bwd_node
A : Dict = current_fwd_node
A : Tuple = {
self.fwd_astar: self.fwd_astar.get_successors(lowerCAmelCase_ ),
self.bwd_astar: self.bwd_astar.get_successors(lowerCAmelCase_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowerCAmelCase_ )
else:
# retrieve the best current path
A : Union[str, Any] = astar.open_nodes.pop(
astar.open_nodes.index(lowerCAmelCase_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowerCAmelCase_ )
else:
astar.open_nodes.append(lowerCAmelCase_ )
return [self.fwd_astar.start.pos]
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ):
A : Union[str, Any] = self.fwd_astar.retrace_path(lowerCAmelCase_ )
A : Dict = self.bwd_astar.retrace_path(lowerCAmelCase_ )
bwd_path.pop()
bwd_path.reverse()
A : str = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
snake_case_ = (0, 0)
snake_case_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
snake_case_ = time.time()
snake_case_ = AStar(init, goal)
snake_case_ = a_star.search()
snake_case_ = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
snake_case_ = time.time()
snake_case_ = BidirectionalAStar(init, goal)
snake_case_ = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 719 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
snake_case_ = {
"""configuration_speecht5""": [
"""SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP""",
"""SpeechT5Config""",
"""SpeechT5HifiGanConfig""",
],
"""feature_extraction_speecht5""": ["""SpeechT5FeatureExtractor"""],
"""processing_speecht5""": ["""SpeechT5Processor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ["""SpeechT5Tokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
"""SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SpeechT5ForSpeechToText""",
"""SpeechT5ForSpeechToSpeech""",
"""SpeechT5ForTextToSpeech""",
"""SpeechT5Model""",
"""SpeechT5PreTrainedModel""",
"""SpeechT5HifiGan""",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 537 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
class snake_case_ ( _lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: Optional[Any] = ["""pixel_values"""]
def __init__( self , __a = True , __a = None , __a = PILImageResampling.BILINEAR , __a = True , __a = 1 / 255 , __a = True , __a = None , __a = True , **__a , ):
"""simple docstring"""
super().__init__(**__a )
A__ = size if size is not None else {'shortest_edge': 224}
A__ = get_size_dict(__a , default_to_square=__a )
A__ = crop_size if crop_size is not None else {'height': 256, 'width': 256}
A__ = get_size_dict(__a , param_name='crop_size' )
A__ = do_resize
A__ = size
A__ = resample
A__ = do_rescale
A__ = rescale_factor
A__ = do_center_crop
A__ = crop_size
A__ = do_flip_channel_order
def _UpperCAmelCase ( self , __a , __a , __a = PIL.Image.BILINEAR , __a = None , **__a , ):
"""simple docstring"""
A__ = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' )
A__ = get_resize_output_image_size(__a , size=size['shortest_edge'] , default_to_square=__a )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def _UpperCAmelCase ( self , __a , __a , __a = None , **__a , ):
"""simple docstring"""
A__ = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
return center_crop(__a , size=(size['height'], size['width']) , data_format=__a , **__a )
def _UpperCAmelCase ( self , __a , __a , __a = None , **__a , ):
"""simple docstring"""
return rescale(__a , scale=__a , data_format=__a , **__a )
def _UpperCAmelCase ( self , __a , __a = None ):
"""simple docstring"""
return flip_channel_order(__a , data_format=__a )
def _UpperCAmelCase ( self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ):
"""simple docstring"""
A__ = do_resize if do_resize is not None else self.do_resize
A__ = resample if resample is not None else self.resample
A__ = do_rescale if do_rescale is not None else self.do_rescale
A__ = rescale_factor if rescale_factor is not None else self.rescale_factor
A__ = do_center_crop if do_center_crop is not None else self.do_center_crop
A__ = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
A__ = size if size is not None else self.size
A__ = get_size_dict(__a , default_to_square=__a )
A__ = crop_size if crop_size is not None else self.crop_size
A__ = get_size_dict(__a , param_name='crop_size' )
A__ = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
# All transformations expect numpy arrays.
A__ = [to_numpy_array(__a ) for image in images]
if do_resize:
A__ = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
A__ = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
A__ = [self.rescale(image=__a , scale=__a ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
A__ = [self.flip_channel_order(image=__a ) for image in images]
A__ = [to_channel_dimension_format(__a , __a ) for image in images]
A__ = {'pixel_values': images}
return BatchFeature(data=__a , tensor_type=__a )
def _UpperCAmelCase ( self , __a , __a = None ):
"""simple docstring"""
A__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__a ) != len(__a ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(__a ):
A__ = target_sizes.numpy()
A__ = []
for idx in range(len(__a ) ):
A__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=__a )
A__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__a )
else:
A__ = logits.argmax(dim=1 )
A__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 260 |
"""simple docstring"""
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class snake_case_ ( _lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: torch.FloatTensor
SCREAMING_SNAKE_CASE_: Optional[torch.FloatTensor] = None
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__=0.9_9_9 ,lowerCAmelCase__="cosine" ,):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCAmelCase__ ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCAmelCase__ ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
A__ = []
for i in range(lowerCAmelCase__ ):
A__ = i / num_diffusion_timesteps
A__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCAmelCase__ ) / alpha_bar_fn(lowerCAmelCase__ ) ,lowerCAmelCase__ ) )
return torch.tensor(lowerCAmelCase__ ,dtype=torch.floataa )
class snake_case_ ( _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: List[Any] = 1
@register_to_config
def __init__( self , __a = 1000 , __a = 0.0001 , __a = 0.02 , __a = "linear" , __a = None , __a = True , __a = True , __a = 0 , __a = "epsilon" , __a = 1.0 , **__a , ):
"""simple docstring"""
if kwargs.get('set_alpha_to_one' , __a ) is not None:
A__ = (
'The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'
)
deprecate('set_alpha_to_one' , '1.0.0' , __a , standard_warn=__a )
A__ = kwargs['set_alpha_to_one']
if trained_betas is not None:
A__ = torch.tensor(__a , dtype=torch.floataa )
elif beta_schedule == "linear":
A__ = torch.linspace(__a , __a , __a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
A__ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
A__ = betas_for_alpha_bar(__a )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
A__ = 1.0 - self.betas
A__ = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
A__ = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
A__ = 1.0
# setable values
A__ = None
A__ = torch.from_numpy(np.arange(0 , __a ).copy().astype(np.intaa ) )
def _UpperCAmelCase ( self , __a , __a = None ):
"""simple docstring"""
return sample
def _UpperCAmelCase ( self , __a , __a = None ):
"""simple docstring"""
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
f''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
f''' maximal {self.config.num_train_timesteps} timesteps.''' )
A__ = num_inference_steps
A__ = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A__ = (np.arange(0 , __a ) * step_ratio).round().copy().astype(np.intaa )
A__ = torch.from_numpy(__a ).to(__a )
self.timesteps += self.config.steps_offset
def _UpperCAmelCase ( self , __a , __a , __a , __a = 0.0 , __a = False , __a = None , __a = True , ):
"""simple docstring"""
A__ = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
A__ = self.alphas_cumprod[timestep]
A__ = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
A__ = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
A__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
A__ = model_output
elif self.config.prediction_type == "sample":
A__ = model_output
A__ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
A__ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
A__ = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
' `v_prediction`' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
A__ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A__ = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A__ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=__a , pred_original_sample=__a )
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 260 | 1 |
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any]=13 , __magic_name__ : Optional[int]=7 , __magic_name__ : Dict=True , __magic_name__ : List[str]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Any=99 , __magic_name__ : Tuple=32 , __magic_name__ : Dict=5 , __magic_name__ : List[Any]=4 , __magic_name__ : str=37 , __magic_name__ : List[str]="gelu" , __magic_name__ : List[Any]=0.1 , __magic_name__ : Dict=0.1 , __magic_name__ : Any=512 , __magic_name__ : Optional[Any]=16 , __magic_name__ : Union[str, Any]=2 , __magic_name__ : Optional[int]=0.02 , __magic_name__ : Dict=4 , ) -> List[Any]:
lowerCamelCase_ : Optional[Any] = parent
lowerCamelCase_ : List[Any] = batch_size
lowerCamelCase_ : Dict = seq_length
lowerCamelCase_ : str = is_training
lowerCamelCase_ : Dict = use_attention_mask
lowerCamelCase_ : Dict = use_token_type_ids
lowerCamelCase_ : Union[str, Any] = use_labels
lowerCamelCase_ : str = vocab_size
lowerCamelCase_ : str = hidden_size
lowerCamelCase_ : int = num_hidden_layers
lowerCamelCase_ : Union[str, Any] = num_attention_heads
lowerCamelCase_ : Tuple = intermediate_size
lowerCamelCase_ : Union[str, Any] = hidden_act
lowerCamelCase_ : Optional[Any] = hidden_dropout_prob
lowerCamelCase_ : str = attention_probs_dropout_prob
lowerCamelCase_ : Dict = max_position_embeddings
lowerCamelCase_ : str = type_vocab_size
lowerCamelCase_ : int = type_sequence_label_size
lowerCamelCase_ : List[Any] = initializer_range
lowerCamelCase_ : Union[str, Any] = num_choices
def __SCREAMING_SNAKE_CASE ( self : str ) -> str:
lowerCamelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : Tuple = None
if self.use_attention_mask:
lowerCamelCase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ : Dict = None
if self.use_token_type_ids:
lowerCamelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ : Dict = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
lowerCamelCase_ : str = self.prepare_config_and_inputs()
lowerCamelCase_ : Optional[int] = config_and_inputs
lowerCamelCase_ : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
lowerCamelCase_ : int = self.prepare_config_and_inputs()
lowerCamelCase_ : str = config_and_inputs
lowerCamelCase_ : Union[str, Any] = True
lowerCamelCase_ : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class snake_case_ ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = True
lowerCamelCase = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
lowerCamelCase_ : str = FlaxBertModelTester(self )
@slow
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
lowerCamelCase_ : Optional[Any] = FlaxBertModel.from_pretrained("bert-base-cased" )
lowerCamelCase_ : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(__magic_name__ )
| 716 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_ ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = LayoutLMTokenizer
lowerCamelCase = LayoutLMTokenizerFast
lowerCamelCase = True
lowerCamelCase = True
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
super().setUp()
lowerCamelCase_ : int = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCamelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def __SCREAMING_SNAKE_CASE ( self : List[str] , **__magic_name__ : Dict ) -> List[str]:
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : List[Any] ) -> Union[str, Any]:
lowerCamelCase_ : str = "UNwant\u00E9d,running"
lowerCamelCase_ : List[Any] = "unwanted, running"
return input_text, output_text
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
lowerCamelCase_ : Dict = self.tokenizer_class(self.vocab_file )
lowerCamelCase_ : List[Any] = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(__magic_name__ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , [7, 4, 5, 10, 8, 9] )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
pass
| 253 | 0 |
'''simple docstring'''
import requests
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> None:
"""simple docstring"""
snake_case_ : Union[str, Any] = {"Content-Type": "application/json"}
snake_case_ : str = requests.post(__magic_name__ ,json={"text": message_body} ,headers=__magic_name__ )
if response.status_code != 200:
snake_case_ : List[str] = (
"Request to slack returned an error "
F'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(__magic_name__ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
| 653 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : int = logging.get_logger()
@dataclass
class A_ :
"""simple docstring"""
a__ = 42
a__ = field(default_factory=a_ )
a__ = field(default_factory=a_ )
def _A ( self :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tensor , lowerCAmelCase__ :Tensor ) -> int:
'''simple docstring'''
snake_case_ : int = len(list(m.modules() ) ) == 1 or isinstance(lowerCAmelCase__ , nn.Convad ) or isinstance(lowerCAmelCase__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowerCAmelCase__ )
def __call__( self :List[Any] , lowerCAmelCase__ :Tensor ) -> Union[str, Any]:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowerCAmelCase__ )
[x.remove() for x in self.handles]
return self
@property
def _A ( self :int ) -> List[Any]:
'''simple docstring'''
return list(filter(lambda lowerCAmelCase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class A_ :
"""simple docstring"""
a__ = 42
a__ = 42
a__ = 0
a__ = field(default_factory=a_ )
a__ = field(default_factory=a_ )
def __call__( self :Tuple , lowerCAmelCase__ :Tensor ) -> Tuple:
'''simple docstring'''
snake_case_ : List[Any] = Tracker(self.dest )(lowerCAmelCase__ ).parametrized
snake_case_ : Tuple = Tracker(self.src )(lowerCAmelCase__ ).parametrized
snake_case_ : List[str] = list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.src_skip , lowerCAmelCase__ ) )
snake_case_ : Tuple = list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.dest_skip , lowerCAmelCase__ ) )
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise Exception(
F'''Numbers of operations are different. Source module has {len(lowerCAmelCase__ )} operations while'''
F''' destination module has {len(lowerCAmelCase__ )}.''' )
for dest_m, src_m in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ = True )-> Optional[int]:
"""simple docstring"""
print(F'''Converting {name}...''' )
with torch.no_grad():
snake_case_ : List[str] = timm.create_model(__magic_name__ ,pretrained=__magic_name__ ).eval()
snake_case_ : Optional[int] = ResNetForImageClassification(__magic_name__ ).eval()
snake_case_ : Dict = ModuleTransfer(src=__magic_name__ ,dest=__magic_name__ )
snake_case_ : Optional[int] = torch.randn((1, 3, 224, 224) )
module_transfer(__magic_name__ )
assert torch.allclose(from_model(__magic_name__ ) ,our_model(__magic_name__ ).logits ), "The model logits don't match the original one."
snake_case_ : str = F'''resnet{'-'.join(name.split('resnet' ) )}'''
print(__magic_name__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name ,commit_message="Add model" ,use_temp_dir=__magic_name__ ,)
# we can use the convnext one
snake_case_ : Optional[Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name ,commit_message="Add image processor" ,use_temp_dir=__magic_name__ ,)
print(F'''Pushed {checkpoint_name}''' )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = None ,__magic_name__ = True )-> Tuple:
"""simple docstring"""
snake_case_ : List[str] = "imagenet-1k-id2label.json"
snake_case_ : Optional[Any] = 1000
snake_case_ : List[Any] = (1, num_labels)
snake_case_ : Optional[Any] = "huggingface/label-files"
snake_case_ : Dict = num_labels
snake_case_ : List[Any] = json.load(open(hf_hub_download(__magic_name__ ,__magic_name__ ,repo_type="dataset" ) ,"r" ) )
snake_case_ : List[str] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case_ : Any = idalabel
snake_case_ : List[Any] = {v: k for k, v in idalabel.items()}
snake_case_ : Optional[int] = partial(__magic_name__ ,num_labels=__magic_name__ ,idalabel=__magic_name__ ,labelaid=__magic_name__ )
snake_case_ : Optional[int] = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] ,hidden_sizes=[64, 128, 256, 512] ,layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] ,hidden_sizes=[64, 128, 256, 512] ,layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(__magic_name__ ,names_to_config[model_name] ,__magic_name__ ,__magic_name__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
return config, expected_shape
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
__lowerCamelCase : Tuple = parser.parse_args()
__lowerCamelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 653 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class _a ( __SCREAMING_SNAKE_CASE ):
_lowercase : str = '''openai-gpt'''
_lowercase : Any = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self: List[str] , UpperCamelCase_: Any=40_478 , UpperCamelCase_: Any=512 , UpperCamelCase_: str=768 , UpperCamelCase_: List[Any]=12 , UpperCamelCase_: Union[str, Any]=12 , UpperCamelCase_: Optional[Any]="gelu" , UpperCamelCase_: Optional[int]=0.1 , UpperCamelCase_: Dict=0.1 , UpperCamelCase_: List[str]=0.1 , UpperCamelCase_: Optional[int]=1E-5 , UpperCamelCase_: Any=0.02 , UpperCamelCase_: Dict="cls_index" , UpperCamelCase_: str=True , UpperCamelCase_: int=None , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Tuple=0.1 , **UpperCamelCase_: List[Any] , ) -> List[str]:
"""simple docstring"""
lowercase__ = vocab_size
lowercase__ = n_positions
lowercase__ = n_embd
lowercase__ = n_layer
lowercase__ = n_head
lowercase__ = afn
lowercase__ = resid_pdrop
lowercase__ = embd_pdrop
lowercase__ = attn_pdrop
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_range
lowercase__ = summary_type
lowercase__ = summary_use_proj
lowercase__ = summary_activation
lowercase__ = summary_first_dropout
lowercase__ = summary_proj_to_labels
super().__init__(**_a )
| 705 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowerCAmelCase = get_logger(__name__)
class _a :
def __init__( self: Dict , UpperCamelCase_: Optional[str] = None ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = (
os.path.join(UpperCamelCase_ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
lowercase__ = Extractor
def lowerCamelCase_ ( self: Dict , UpperCamelCase_: str ) -> str:
"""simple docstring"""
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
lowercase__ = os.path.abspath(UpperCamelCase_ )
return os.path.join(self.extract_dir , hash_url_to_filename(UpperCamelCase_ ) )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: str , UpperCamelCase_: bool ) -> bool:
"""simple docstring"""
return force_extract or (
not os.path.isfile(UpperCamelCase_ ) and not (os.path.isdir(UpperCamelCase_ ) and os.listdir(UpperCamelCase_ ))
)
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: str , UpperCamelCase_: bool = False ) -> str:
"""simple docstring"""
lowercase__ = self.extractor.infer_extractor_format(UpperCamelCase_ )
if not extractor_format:
return input_path
lowercase__ = self._get_output_path(UpperCamelCase_ )
if self._do_extract(UpperCamelCase_ , UpperCamelCase_ ):
self.extractor.extract(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return output_path
class _a ( UpperCamelCase__ ):
@classmethod
@abstractmethod
def lowerCamelCase_ ( cls: str , UpperCamelCase_: Union[Path, str] , **UpperCamelCase_: Any ) -> bool:
"""simple docstring"""
...
@staticmethod
@abstractmethod
def lowerCamelCase_ ( UpperCamelCase_: Union[Path, str] , UpperCamelCase_: Union[Path, str] ) -> None:
"""simple docstring"""
...
class _a ( UpperCamelCase__ , UpperCamelCase__ ):
_lowercase : List[bytes] = []
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: Union[Path, str] , UpperCamelCase_: int ) -> Union[str, Any]:
"""simple docstring"""
with open(UpperCamelCase_ , '''rb''' ) as f:
return f.read(UpperCamelCase_ )
@classmethod
def lowerCamelCase_ ( cls: int , UpperCamelCase_: Union[Path, str] , UpperCamelCase_: bytes = b"" ) -> bool:
"""simple docstring"""
if not magic_number:
lowercase__ = max(len(UpperCamelCase_ ) for cls_magic_number in cls.magic_numbers )
try:
lowercase__ = cls.read_magic_number(UpperCamelCase_ , UpperCamelCase_ )
except OSError:
return False
return any(magic_number.startswith(UpperCamelCase_ ) for cls_magic_number in cls.magic_numbers )
class _a ( UpperCamelCase__ ):
@classmethod
def lowerCamelCase_ ( cls: Dict , UpperCamelCase_: Union[Path, str] , **UpperCamelCase_: Dict ) -> bool:
"""simple docstring"""
return tarfile.is_tarfile(UpperCamelCase_ )
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: Tuple , UpperCamelCase_: Union[str, Any] ) -> List[Any]:
"""simple docstring"""
def resolved(UpperCamelCase_: str ) -> str:
return os.path.realpath(os.path.abspath(UpperCamelCase_ ) )
def badpath(UpperCamelCase_: str , UpperCamelCase_: str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) ).startswith(UpperCamelCase_ )
def badlink(UpperCamelCase_: Any , UpperCamelCase_: str ) -> bool:
# Links are interpreted relative to the directory containing the link
lowercase__ = resolved(os.path.join(UpperCamelCase_ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=UpperCamelCase_ )
lowercase__ = resolved(UpperCamelCase_ )
for finfo in members:
if badpath(finfo.name , UpperCamelCase_ ):
logger.error(f'Extraction of {finfo.name} is blocked (illegal path)' )
elif finfo.issym() and badlink(UpperCamelCase_ , UpperCamelCase_ ):
logger.error(f'Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}' )
elif finfo.islnk() and badlink(UpperCamelCase_ , UpperCamelCase_ ):
logger.error(f'Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}' )
else:
yield finfo
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: Union[Path, str] , UpperCamelCase_: Union[Path, str] ) -> None:
"""simple docstring"""
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
lowercase__ = tarfile.open(UpperCamelCase_ )
tar_file.extractall(UpperCamelCase_ , members=TarExtractor.safemembers(UpperCamelCase_ , UpperCamelCase_ ) )
tar_file.close()
class _a ( UpperCamelCase__ ):
_lowercase : Dict = [b'''\x1F\x8B''']
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: Union[Path, str] , UpperCamelCase_: Union[Path, str] ) -> None:
"""simple docstring"""
with gzip.open(UpperCamelCase_ , '''rb''' ) as gzip_file:
with open(UpperCamelCase_ , '''wb''' ) as extracted_file:
shutil.copyfileobj(UpperCamelCase_ , UpperCamelCase_ )
class _a ( UpperCamelCase__ ):
_lowercase : List[str] = [
b'''PK\x03\x04''',
b'''PK\x05\x06''', # empty archive
b'''PK\x07\x08''', # spanned archive
]
@classmethod
def lowerCamelCase_ ( cls: str , UpperCamelCase_: Union[Path, str] , UpperCamelCase_: bytes = b"" ) -> bool:
"""simple docstring"""
if super().is_extractable(UpperCamelCase_ , magic_number=UpperCamelCase_ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(UpperCamelCase_ , '''rb''' ) as fp:
lowercase__ = _EndRecData(UpperCamelCase_ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
lowercase__ = fp.read(UpperCamelCase_ ) # CD is where we expect it to be
if len(UpperCamelCase_ ) == sizeCentralDir:
lowercase__ = struct.unpack(UpperCamelCase_ , UpperCamelCase_ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: Union[Path, str] , UpperCamelCase_: Union[Path, str] ) -> None:
"""simple docstring"""
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
with zipfile.ZipFile(UpperCamelCase_ , '''r''' ) as zip_file:
zip_file.extractall(UpperCamelCase_ )
zip_file.close()
class _a ( UpperCamelCase__ ):
_lowercase : int = [b'''\xFD\x37\x7A\x58\x5A\x00''']
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: Union[Path, str] , UpperCamelCase_: Union[Path, str] ) -> None:
"""simple docstring"""
with lzma.open(UpperCamelCase_ ) as compressed_file:
with open(UpperCamelCase_ , '''wb''' ) as extracted_file:
shutil.copyfileobj(UpperCamelCase_ , UpperCamelCase_ )
class _a ( UpperCamelCase__ ):
_lowercase : Union[str, Any] = [b'''Rar!\x1a\x07\x00''', b'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: Union[Path, str] , UpperCamelCase_: Union[Path, str] ) -> None:
"""simple docstring"""
if not config.RARFILE_AVAILABLE:
raise ImportError('''Please pip install rarfile''' )
import rarfile
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
lowercase__ = rarfile.RarFile(UpperCamelCase_ )
rf.extractall(UpperCamelCase_ )
rf.close()
class _a ( UpperCamelCase__ ):
_lowercase : List[Any] = [b'''\x28\xb5\x2F\xFD''']
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: Union[Path, str] , UpperCamelCase_: Union[Path, str] ) -> None:
"""simple docstring"""
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('''Please pip install zstandard''' )
import zstandard as zstd
lowercase__ = zstd.ZstdDecompressor()
with open(UpperCamelCase_ , '''rb''' ) as ifh, open(UpperCamelCase_ , '''wb''' ) as ofh:
dctx.copy_stream(UpperCamelCase_ , UpperCamelCase_ )
class _a ( UpperCamelCase__ ):
_lowercase : List[Any] = [b'''\x42\x5A\x68''']
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: Union[Path, str] , UpperCamelCase_: Union[Path, str] ) -> None:
"""simple docstring"""
with bza.open(UpperCamelCase_ , '''rb''' ) as compressed_file:
with open(UpperCamelCase_ , '''wb''' ) as extracted_file:
shutil.copyfileobj(UpperCamelCase_ , UpperCamelCase_ )
class _a ( UpperCamelCase__ ):
_lowercase : str = [b'''\x37\x7A\xBC\xAF\x27\x1C''']
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: Union[Path, str] , UpperCamelCase_: Union[Path, str] ) -> None:
"""simple docstring"""
if not config.PY7ZR_AVAILABLE:
raise ImportError('''Please pip install py7zr''' )
import pyazr
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
with pyazr.SevenZipFile(UpperCamelCase_ , '''r''' ) as archive:
archive.extractall(UpperCamelCase_ )
class _a ( UpperCamelCase__ ):
_lowercase : str = [b'''\x04\x22\x4D\x18''']
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: Union[Path, str] , UpperCamelCase_: Union[Path, str] ) -> None:
"""simple docstring"""
if not config.LZ4_AVAILABLE:
raise ImportError('''Please pip install lz4''' )
import lza.frame
with lza.frame.open(UpperCamelCase_ , '''rb''' ) as compressed_file:
with open(UpperCamelCase_ , '''wb''' ) as extracted_file:
shutil.copyfileobj(UpperCamelCase_ , UpperCamelCase_ )
class _a :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
_lowercase : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def lowerCamelCase_ ( cls: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return max(
len(UpperCamelCase_ )
for extractor in cls.extractors.values()
if issubclass(UpperCamelCase_ , UpperCamelCase_ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def lowerCamelCase_ ( UpperCamelCase_: Union[Path, str] , UpperCamelCase_: int ) -> Optional[Any]:
"""simple docstring"""
try:
return MagicNumberBaseExtractor.read_magic_number(UpperCamelCase_ , magic_number_length=UpperCamelCase_ )
except OSError:
return b""
@classmethod
def lowerCamelCase_ ( cls: List[Any] , UpperCamelCase_: Union[Path, str] , UpperCamelCase_: bool = False ) -> bool:
"""simple docstring"""
warnings.warn(
'''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'infer_extractor_format\' instead.''' , category=UpperCamelCase_ , )
lowercase__ = cls.infer_extractor_format(UpperCamelCase_ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def lowerCamelCase_ ( cls: List[Any] , UpperCamelCase_: Union[Path, str] ) -> str: # <Added version="2.4.0"/>
"""simple docstring"""
lowercase__ = cls._get_magic_number_max_length()
lowercase__ = cls._read_magic_number(UpperCamelCase_ , UpperCamelCase_ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(UpperCamelCase_ , magic_number=UpperCamelCase_ ):
return extractor_format
@classmethod
def lowerCamelCase_ ( cls: Optional[int] , UpperCamelCase_: Union[Path, str] , UpperCamelCase_: Union[Path, str] , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[BaseExtractor] = "deprecated" , ) -> None:
"""simple docstring"""
os.makedirs(os.path.dirname(UpperCamelCase_ ) , exist_ok=UpperCamelCase_ )
# Prevent parallel extractions
lowercase__ = str(Path(UpperCamelCase_ ).with_suffix('''.lock''' ) )
with FileLock(UpperCamelCase_ ):
shutil.rmtree(UpperCamelCase_ , ignore_errors=UpperCamelCase_ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(UpperCamelCase_ , UpperCamelCase_ ): # passed as positional arg
warnings.warn(
'''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'extractor_format\' instead.''' , category=UpperCamelCase_ , )
lowercase__ = extractor if extractor != '''deprecated''' else extractor_format
else:
lowercase__ = cls.extractors[extractor_format]
return extractor.extract(UpperCamelCase_ , UpperCamelCase_ )
else:
warnings.warn(
'''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '''
'''exception in 3.0.0.''' , category=UpperCamelCase_ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(UpperCamelCase_ ):
return extractor.extract(UpperCamelCase_ , UpperCamelCase_ )
| 429 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __magic_name__ :
UpperCamelCase__ = 42
UpperCamelCase__ = None
# Automatically constructed
UpperCamelCase__ = "dict"
UpperCamelCase__ = None
UpperCamelCase__ = field(default='Translation' , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE )
def __call__( self ):
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def _A( self ):
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class __magic_name__ :
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
# Automatically constructed
UpperCamelCase__ = "dict"
UpperCamelCase__ = None
UpperCamelCase__ = field(default='TranslationVariableLanguages' , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE )
def _A( self ):
lowercase =sorted(set(self.languages ) ) if self.languages else None
lowercase =len(self.languages ) if self.languages else None
def __call__( self ):
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def _A( self , snake_case_ ):
lowercase =set(self.languages )
if self.languages and set(snake_case_ ) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(snake_case_ ) - lang_set ) )}) are not in valid set ({", ".join(snake_case_ )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowercase =[]
for lang, text in translation_dict.items():
if isinstance(snake_case_ , snake_case_ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowercase , lowercase =zip(*sorted(snake_case_ ) )
return {"language": languages, "translation": translations}
def _A( self ):
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 72 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : Optional[Any] = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 328 | 0 |
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def UpperCAmelCase__ ( A__ = 8 ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ = ascii_letters + digits + punctuation
return "".join(secrets.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
def UpperCAmelCase__ ( A__ , A__ ) -> str:
"""simple docstring"""
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(_lowerCamelCase )
lowerCamelCase__ = i // 3
lowerCamelCase__ = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
lowerCamelCase__ = (
chars_incl
+ random(_lowerCamelCase , quotient + remainder )
+ random(_lowerCamelCase , _lowerCamelCase )
+ random(_lowerCamelCase , _lowerCamelCase )
)
lowerCamelCase__ = list(_lowerCamelCase )
shuffle(_lowerCamelCase )
return "".join(_lowerCamelCase )
# random is a generalised function for letters, characters and numbers
def UpperCAmelCase__ ( A__ , A__ ) -> List[Any]:
"""simple docstring"""
return "".join(secrets.choice(_lowerCamelCase ) for _ in range(_lowerCamelCase ) )
def UpperCAmelCase__ ( A__ , A__ ) -> List[str]:
"""simple docstring"""
pass # Put your code here...
def UpperCAmelCase__ ( A__ , A__ ) -> Union[str, Any]:
"""simple docstring"""
pass # Put your code here...
def UpperCAmelCase__ ( A__ , A__ ) -> Dict:
"""simple docstring"""
pass # Put your code here...
def UpperCAmelCase__ ( A__ , A__ = 8 ) -> Union[str, Any]:
"""simple docstring"""
if len(_lowerCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
lowerCamelCase__ = any(char in ascii_uppercase for char in password )
lowerCamelCase__ = any(char in ascii_lowercase for char in password )
lowerCamelCase__ = any(char in digits for char in password )
lowerCamelCase__ = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def UpperCAmelCase__ ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ = int(input("Please indicate the max length of your password: " ).strip() )
lowerCamelCase__ = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(_lowerCamelCase ) )
print(
"Alternative Password generated:" , alternative_password_generator(_lowerCamelCase , _lowerCamelCase ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 700 |
"""simple docstring"""
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def UpperCAmelCase__ ( ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__ = torch.nn.Linear(2 , 4 )
lowerCamelCase__ = torch.optim.AdamW(model.parameters() , lr=1.0 )
lowerCamelCase__ = torch.optim.lr_scheduler.OneCycleLR(A__ , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
lowerCamelCase__ = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
lowerCamelCase__ = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def UpperCAmelCase__ ( A__ ) -> Any:
"""simple docstring"""
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def UpperCAmelCase__ ( A__ ) -> Any:
"""simple docstring"""
lowerCamelCase__ = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(A__ )
class _A ( __a ):
@require_cuda
def _lowerCamelCase ( self ) -> Optional[Any]:
lowerCamelCase__ = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = Accelerator(cpu=SCREAMING_SNAKE_CASE__ )
def _lowerCamelCase ( self ) -> Union[str, Any]:
lowerCamelCase__ = Accelerator()
lowerCamelCase__ = GradientState()
assert state.num_steps == 1
lowerCamelCase__ = 4
assert state.num_steps == 4
assert state.sync_gradients is True
lowerCamelCase__ = False
assert state.sync_gradients is False
GradientState._reset_state()
def _lowerCamelCase ( self ) -> str:
lowerCamelCase__ = Accelerator()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = create_components()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) = accelerator.prepare(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def _lowerCamelCase ( self ) -> Union[str, Any]:
lowerCamelCase__ = Accelerator()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = create_components()
accelerator.prepare(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def _lowerCamelCase ( self ) -> Dict:
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
pass
with patch("torch.cuda.set_device" , SCREAMING_SNAKE_CASE__ ), patch_environment(ACCELERATE_TORCH_DEVICE="cuda:64" ):
lowerCamelCase__ = Accelerator()
self.assertEqual(str(accelerator.state.device ) , "cuda:64" )
def _lowerCamelCase ( self ) -> Optional[Any]:
lowerCamelCase__ = Accelerator()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = create_components()
accelerator.prepare(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = get_signature(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(SCREAMING_SNAKE_CASE__ )
# make sure random weights don't match
load_random_weights(SCREAMING_SNAKE_CASE__ )
self.assertTrue(abs(model_signature - get_signature(SCREAMING_SNAKE_CASE__ ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(SCREAMING_SNAKE_CASE__ )
self.assertTrue(abs(model_signature - get_signature(SCREAMING_SNAKE_CASE__ ) ) < 1e-3 )
def _lowerCamelCase ( self ) -> Any:
lowerCamelCase__ = Accelerator()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = create_components()
accelerator.prepare(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = get_signature(SCREAMING_SNAKE_CASE__ )
# saving hook
def save_config(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = {"class_name": models[0].__class__.__name__}
with open(os.path.join(SCREAMING_SNAKE_CASE__ , "data.json" ) , "w" ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# loading hook
def load_config(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
with open(os.path.join(SCREAMING_SNAKE_CASE__ , "data.json" ) , "r" ) as f:
lowerCamelCase__ = json.load(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = config["class_name"]
lowerCamelCase__ = accelerator.register_save_state_pre_hook(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = accelerator.register_load_state_pre_hook(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(SCREAMING_SNAKE_CASE__ )
# make sure random weights don't match with hooks
load_random_weights(SCREAMING_SNAKE_CASE__ )
self.assertTrue(abs(model_signature - get_signature(SCREAMING_SNAKE_CASE__ ) ) > 1e-3 )
# random class name to verify correct one is loaded
lowerCamelCase__ = "random"
# make sure loaded weights match with hooks
accelerator.load_state(SCREAMING_SNAKE_CASE__ )
self.assertTrue(abs(model_signature - get_signature(SCREAMING_SNAKE_CASE__ ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(SCREAMING_SNAKE_CASE__ )
# make sure random weights don't match with hooks removed
load_random_weights(SCREAMING_SNAKE_CASE__ )
self.assertTrue(abs(model_signature - get_signature(SCREAMING_SNAKE_CASE__ ) ) > 1e-3 )
# random class name to verify correct one is loaded
lowerCamelCase__ = "random"
# make sure loaded weights match with hooks removed
accelerator.load_state(SCREAMING_SNAKE_CASE__ )
self.assertTrue(abs(model_signature - get_signature(SCREAMING_SNAKE_CASE__ ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def _lowerCamelCase ( self ) -> Union[str, Any]:
lowerCamelCase__ = Accelerator()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = create_components()
lowerCamelCase__ = None
# This should work
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertTrue(dummy_obj is None )
def _lowerCamelCase ( self ) -> List[str]:
lowerCamelCase__ = Accelerator()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = create_components()
lowerCamelCase__ = [1, 2, 3]
# This should work
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(
getattr(SCREAMING_SNAKE_CASE__ , "_is_accelerate_prepared" , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , "Dummy object should have `_is_accelerate_prepared` set to `True`" , )
self.assertEqual(
getattr(SCREAMING_SNAKE_CASE__ , "_is_accelerate_prepared" , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , "Model is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(SCREAMING_SNAKE_CASE__ , "_is_accelerate_prepared" , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , "Optimizer is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(SCREAMING_SNAKE_CASE__ , "_is_accelerate_prepared" , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , "Scheduler is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(SCREAMING_SNAKE_CASE__ , "_is_accelerate_prepared" , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , "Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
self.assertEqual(
getattr(SCREAMING_SNAKE_CASE__ , "_is_accelerate_prepared" , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , "Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`" , )
@slow
@require_bnb
def _lowerCamelCase ( self ) -> str:
from transformers import AutoModelForCausalLM
lowerCamelCase__ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=SCREAMING_SNAKE_CASE__ , device_map={"": 0} , )
lowerCamelCase__ = Accelerator()
# This should work
lowerCamelCase__ = accelerator.prepare(SCREAMING_SNAKE_CASE__ )
@slow
@require_bnb
def _lowerCamelCase ( self ) -> Tuple:
from transformers import AutoModelForCausalLM
lowerCamelCase__ = Accelerator()
with init_empty_weights():
lowerCamelCase__ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
lowerCamelCase__ = infer_auto_device_map(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = "cpu"
lowerCamelCase__ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , device_map=SCREAMING_SNAKE_CASE__ , load_in_abit=SCREAMING_SNAKE_CASE__ , llm_inta_enable_fpaa_cpu_offload=SCREAMING_SNAKE_CASE__ )
# This should not work and get value error
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = accelerator.prepare(SCREAMING_SNAKE_CASE__ )
@slow
@require_bnb
@require_multi_gpu
def _lowerCamelCase ( self ) -> Dict:
from transformers import AutoModelForCausalLM
lowerCamelCase__ = {"distributed_type": DistributedType.MULTI_GPU}
with init_empty_weights():
lowerCamelCase__ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
model.tie_weights()
lowerCamelCase__ = infer_auto_device_map(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = 1
lowerCamelCase__ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=SCREAMING_SNAKE_CASE__ , device_map=SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = Accelerator()
# This should not work and get value error
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase__ = accelerator.prepare(SCREAMING_SNAKE_CASE__ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def _lowerCamelCase ( self ) -> List[Any]:
from transformers import AutoModelForCausalLM
with init_empty_weights():
lowerCamelCase__ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , )
lowerCamelCase__ = infer_auto_device_map(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = 1
lowerCamelCase__ = AutoModelForCausalLM.from_pretrained(
"EleutherAI/gpt-neo-125m" , load_in_abit=SCREAMING_SNAKE_CASE__ , device_map=SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = Accelerator()
# This should work
lowerCamelCase__ = accelerator.prepare(SCREAMING_SNAKE_CASE__ )
@require_cuda
def _lowerCamelCase ( self ) -> List[str]:
lowerCamelCase__ = torch.nn.Linear(10 , 10 )
lowerCamelCase__ = torch.optim.SGD(model.parameters() , lr=0.01 )
lowerCamelCase__ = Accelerator(cpu=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = accelerator.prepare(SCREAMING_SNAKE_CASE__ )
| 274 | 0 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=99 , _UpperCamelCase=32 , _UpperCamelCase=5 , _UpperCamelCase=4 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=16 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=4 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_attention_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_choices
def UpperCamelCase( self ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_attention_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase( self ):
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def UpperCamelCase( self ):
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = True
_UpperCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __UpperCamelCase ( A__ , unittest.TestCase ):
__A : Union[str, Any] = True
__A : str = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase( self ):
_UpperCAmelCase = FlaxRobertaModelTester(self )
@slow
def UpperCamelCase( self ):
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained('''roberta-base''' , from_pt=_UpperCamelCase )
_UpperCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCamelCase ) | 32 |
def lowerCAmelCase__ ( UpperCamelCase_ : int )-> int:
A__ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowerCAmelCase__ ( UpperCamelCase_ : int )-> int:
A__ = 0
while number > 0:
A__ = number % 1_0
sum_of_digits += last_digit
A__ = number // 1_0 # Removing the last_digit from the given number
return sum_of_digits
def lowerCAmelCase__ ( UpperCamelCase_ : int = 1_0_0 )-> int:
A__ = factorial(UpperCamelCase_ )
A__ = split_and_add(UpperCamelCase_ )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 632 | 0 |
"""simple docstring"""
from collections import Counter
from timeit import timeit
def a__ ( a : Optional[Any] = "" , ):
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2
def a__ ( a : Optional[Any] = "" ):
"""simple docstring"""
if len(__lowerCAmelCase ) == 0:
return True
_snake_case : Tuple = input_str.replace(" " , "" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_snake_case : dict[str, int] = {}
for character in lower_case_input_str:
_snake_case : int = character_freq_dict.get(__lowerCAmelCase , 0 ) + 1
_snake_case : Dict = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def a__ ( a : List[str] = "" ):
"""simple docstring"""
print("\nFor string = " , __lowerCAmelCase , ":" )
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(__lowerCAmelCase ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(__lowerCAmelCase ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
_a : Any = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
_a : List[str] = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f'{check_str} can {"" if status else "not "}be rearranged as a palindrome')
| 718 |
"""simple docstring"""
from __future__ import annotations
class _UpperCAmelCase :
def __init__( self , snake_case_ , snake_case_ ):
_snake_case , _snake_case : Dict = text, pattern
_snake_case , _snake_case : int = len(snake_case_ ), len(snake_case_ )
def lowerCamelCase__ ( self , snake_case_ ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def lowerCamelCase__ ( self , snake_case_ ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def lowerCamelCase__ ( self ):
# searches pattern in text and returns index positions
_snake_case : List[str] = []
for i in range(self.textLen - self.patLen + 1 ):
_snake_case : Union[str, Any] = self.mismatch_in_text(snake_case_ )
if mismatch_index == -1:
positions.append(snake_case_ )
else:
_snake_case : Tuple = self.match_in_pattern(self.text[mismatch_index] )
_snake_case : Tuple = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
_a : List[Any] = """ABAABA"""
_a : str = """AB"""
_a : List[Any] = BoyerMooreSearch(text, pattern)
_a : Any = bms.bad_character_heuristic()
if len(positions) == 0:
print("""No match found""")
else:
print("""Pattern found in following positions: """)
print(positions)
| 87 | 0 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
a_ = None
a_ = """<""" if sys.byteorder == """little""" else """>"""
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
a_ = [
np.dtype("""|b1"""),
np.dtype("""|u1"""),
np.dtype("""<u2"""),
np.dtype(""">u2"""),
np.dtype("""<i2"""),
np.dtype(""">i2"""),
np.dtype("""<u4"""),
np.dtype(""">u4"""),
np.dtype("""<i4"""),
np.dtype(""">i4"""),
np.dtype("""<f4"""),
np.dtype(""">f4"""),
np.dtype("""<f8"""),
np.dtype(""">f8"""),
]
@dataclass
class __lowerCAmelCase :
lowerCAmelCase__ = True
lowerCAmelCase__ = None
# Automatically constructed
lowerCAmelCase__ = "PIL.Image.Image"
lowerCAmelCase__ = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
lowerCAmelCase__ = field(default="""Image""" , init=lowerCAmelCase__ , repr=lowerCAmelCase__ )
def __call__( self ):
'''simple docstring'''
return self.pa_type
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__lowerCamelCase = np.array(__UpperCAmelCase )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return {"path": value, "bytes": None}
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return {"path": None, "bytes": value}
elif isinstance(__UpperCAmelCase , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(__UpperCAmelCase )
elif isinstance(__UpperCAmelCase , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(__UpperCAmelCase )
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
F"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None ):
'''simple docstring'''
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''' )
if token_per_repo_id is None:
__lowerCamelCase = {}
__lowerCamelCase ,__lowerCamelCase = value['''path'''], value['''bytes''']
if bytes_ is None:
if path is None:
raise ValueError(F"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" )
else:
if is_local_path(__UpperCAmelCase ):
__lowerCamelCase = PIL.Image.open(__UpperCAmelCase )
else:
__lowerCamelCase = path.split('''::''' )[-1]
try:
__lowerCamelCase = string_to_dict(__UpperCAmelCase , config.HUB_DATASETS_URL )['''repo_id''']
__lowerCamelCase = token_per_repo_id.get(__UpperCAmelCase )
except ValueError:
__lowerCamelCase = None
with xopen(__UpperCAmelCase , '''rb''' , use_auth_token=__UpperCAmelCase ) as f:
__lowerCamelCase = BytesIO(f.read() )
__lowerCamelCase = PIL.Image.open(bytes_ )
else:
__lowerCamelCase = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def lowerCamelCase ( self ):
'''simple docstring'''
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
)
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if pa.types.is_string(storage.type ):
__lowerCamelCase = pa.array([None] * len(__UpperCAmelCase ) , type=pa.binary() )
__lowerCamelCase = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__lowerCamelCase = pa.array([None] * len(__UpperCAmelCase ) , type=pa.string() )
__lowerCamelCase = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
__lowerCamelCase = storage.field('''bytes''' )
else:
__lowerCamelCase = pa.array([None] * len(__UpperCAmelCase ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
__lowerCamelCase = storage.field('''path''' )
else:
__lowerCamelCase = pa.array([None] * len(__UpperCAmelCase ) , type=pa.string() )
__lowerCamelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
__lowerCamelCase = pa.array(
[encode_np_array(np.array(__UpperCAmelCase ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
__lowerCamelCase = pa.array([None] * len(__UpperCAmelCase ) , type=pa.string() )
__lowerCamelCase = pa.StructArray.from_arrays(
[bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(__UpperCAmelCase , self.pa_type )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(__UpperCAmelCase ):
with xopen(__UpperCAmelCase , '''rb''' ) as f:
__lowerCamelCase = f.read()
return bytes_
__lowerCamelCase = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__lowerCamelCase = pa.array(
[os.path.basename(__UpperCAmelCase ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
__lowerCamelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(__UpperCAmelCase , self.pa_type )
def a__ ( ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
__lowerCamelCase = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def a__ ( _UpperCamelCase : "PIL.Image.Image" ):
__lowerCamelCase = BytesIO()
if image.format in list_image_compression_formats():
__lowerCamelCase = image.format
else:
__lowerCamelCase = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF'''
image.save(_UpperCamelCase ,format=_UpperCamelCase )
return buffer.getvalue()
def a__ ( _UpperCamelCase : "PIL.Image.Image" ):
if hasattr(_UpperCamelCase ,'''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def a__ ( _UpperCamelCase : np.ndarray ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
__lowerCamelCase = array.dtype
__lowerCamelCase = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER
__lowerCamelCase = dtype.kind
__lowerCamelCase = dtype.itemsize
__lowerCamelCase = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
__lowerCamelCase = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" )
if dtype is not dest_dtype:
warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
__lowerCamelCase = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
__lowerCamelCase = dtype_byteorder + dtype_kind + str(_UpperCamelCase )
__lowerCamelCase = np.dtype(_UpperCamelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" )
__lowerCamelCase = PIL.Image.fromarray(array.astype(_UpperCamelCase ) )
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def a__ ( _UpperCamelCase : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
__lowerCamelCase ,__lowerCamelCase = first_non_null_value(_UpperCamelCase )
if isinstance(_UpperCamelCase ,_UpperCamelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_UpperCamelCase ,np.ndarray ):
__lowerCamelCase = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
elif isinstance(_UpperCamelCase ,PIL.Image.Image ):
__lowerCamelCase = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
else:
return objs
else:
return objs
| 175 |
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def a__ ( _UpperCamelCase : List[Any] ):
if "cls_token" in name:
__lowerCamelCase = name.replace('''cls_token''' ,'''vit.embeddings.cls_token''' )
if "mask_token" in name:
__lowerCamelCase = name.replace('''mask_token''' ,'''decoder.mask_token''' )
if "decoder_pos_embed" in name:
__lowerCamelCase = name.replace('''decoder_pos_embed''' ,'''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
__lowerCamelCase = name.replace('''pos_embed''' ,'''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
__lowerCamelCase = name.replace('''patch_embed.proj''' ,'''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
__lowerCamelCase = name.replace('''patch_embed.norm''' ,'''vit.embeddings.norm''' )
if "decoder_blocks" in name:
__lowerCamelCase = name.replace('''decoder_blocks''' ,'''decoder.decoder_layers''' )
if "blocks" in name:
__lowerCamelCase = name.replace('''blocks''' ,'''vit.encoder.layer''' )
if "attn.proj" in name:
__lowerCamelCase = name.replace('''attn.proj''' ,'''attention.output.dense''' )
if "attn" in name:
__lowerCamelCase = name.replace('''attn''' ,'''attention.self''' )
if "norm1" in name:
__lowerCamelCase = name.replace('''norm1''' ,'''layernorm_before''' )
if "norm2" in name:
__lowerCamelCase = name.replace('''norm2''' ,'''layernorm_after''' )
if "mlp.fc1" in name:
__lowerCamelCase = name.replace('''mlp.fc1''' ,'''intermediate.dense''' )
if "mlp.fc2" in name:
__lowerCamelCase = name.replace('''mlp.fc2''' ,'''output.dense''' )
if "decoder_embed" in name:
__lowerCamelCase = name.replace('''decoder_embed''' ,'''decoder.decoder_embed''' )
if "decoder_norm" in name:
__lowerCamelCase = name.replace('''decoder_norm''' ,'''decoder.decoder_norm''' )
if "decoder_pred" in name:
__lowerCamelCase = name.replace('''decoder_pred''' ,'''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
__lowerCamelCase = name.replace('''norm.weight''' ,'''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
__lowerCamelCase = name.replace('''norm.bias''' ,'''vit.layernorm.bias''' )
return name
def a__ ( _UpperCamelCase : Tuple ,_UpperCamelCase : Dict ):
for key in orig_state_dict.copy().keys():
__lowerCamelCase = orig_state_dict.pop(_UpperCamelCase )
if "qkv" in key:
__lowerCamelCase = key.split('''.''' )
__lowerCamelCase = int(key_split[1] )
if "decoder_blocks" in key:
__lowerCamelCase = config.decoder_hidden_size
__lowerCamelCase = '''decoder.decoder_layers.'''
if "weight" in key:
__lowerCamelCase = val[:dim, :]
__lowerCamelCase = val[dim : dim * 2, :]
__lowerCamelCase = val[-dim:, :]
elif "bias" in key:
__lowerCamelCase = val[:dim]
__lowerCamelCase = val[dim : dim * 2]
__lowerCamelCase = val[-dim:]
else:
__lowerCamelCase = config.hidden_size
__lowerCamelCase = '''vit.encoder.layer.'''
if "weight" in key:
__lowerCamelCase = val[:dim, :]
__lowerCamelCase = val[dim : dim * 2, :]
__lowerCamelCase = val[-dim:, :]
elif "bias" in key:
__lowerCamelCase = val[:dim]
__lowerCamelCase = val[dim : dim * 2]
__lowerCamelCase = val[-dim:]
else:
__lowerCamelCase = val
return orig_state_dict
def a__ ( _UpperCamelCase : Dict ,_UpperCamelCase : Any ):
__lowerCamelCase = ViTMAEConfig()
if "large" in checkpoint_url:
__lowerCamelCase = 10_24
__lowerCamelCase = 40_96
__lowerCamelCase = 24
__lowerCamelCase = 16
elif "huge" in checkpoint_url:
__lowerCamelCase = 14
__lowerCamelCase = 12_80
__lowerCamelCase = 51_20
__lowerCamelCase = 32
__lowerCamelCase = 16
__lowerCamelCase = ViTMAEForPreTraining(_UpperCamelCase )
__lowerCamelCase = torch.hub.load_state_dict_from_url(_UpperCamelCase ,map_location='''cpu''' )['''model''']
__lowerCamelCase = ViTMAEImageProcessor(size=config.image_size )
__lowerCamelCase = convert_state_dict(_UpperCamelCase ,_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
model.eval()
__lowerCamelCase = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
__lowerCamelCase = Image.open(requests.get(_UpperCamelCase ,stream=_UpperCamelCase ).raw )
__lowerCamelCase = ViTMAEImageProcessor(size=config.image_size )
__lowerCamelCase = image_processor(images=_UpperCamelCase ,return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
__lowerCamelCase = model(**_UpperCamelCase )
__lowerCamelCase = outputs.logits
if "large" in checkpoint_url:
__lowerCamelCase = torch.tensor(
[[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] )
elif "huge" in checkpoint_url:
__lowerCamelCase = torch.tensor(
[[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] )
else:
__lowerCamelCase = torch.tensor(
[[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] ,_UpperCamelCase ,atol=1e-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
a_ = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 175 | 1 |
'''simple docstring'''
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class snake_case :
def __init__( self ,UpperCAmelCase_ ) -> List[str]:
lowercase__ = data
lowercase__ = [0X6_7_4_5_2_3_0_1, 0Xe_f_c_d_a_b_8_9, 0X9_8_b_a_d_c_f_e, 0X1_0_3_2_5_4_7_6, 0Xc_3_d_2_e_1_f_0]
@staticmethod
def _a ( UpperCAmelCase_ ,UpperCAmelCase_ ) -> List[Any]:
return ((n << b) | (n >> (32 - b))) & 0Xf_f_f_f_f_f_f_f
def _a ( self ) -> Union[str, Any]:
lowercase__ = b"\x80" + b"\x00" * (63 - (len(self.data ) + 8) % 64)
lowercase__ = self.data + padding + struct.pack(">Q" ,8 * len(self.data ) )
return padded_data
def _a ( self ) -> List[Any]:
return [
self.padded_data[i : i + 64] for i in range(0 ,len(self.padded_data ) ,64 )
]
def _a ( self ,UpperCAmelCase_ ) -> int:
lowercase__ = list(struct.unpack(">16L" ,UpperCAmelCase_ ) ) + [0] * 64
for i in range(16 ,80 ):
lowercase__ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) ,1 )
return w
def _a ( self ) -> int:
lowercase__ = self.padding()
lowercase__ = self.split_blocks()
for block in self.blocks:
lowercase__ = self.expand_block(UpperCAmelCase_ )
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = self.h
for i in range(0 ,80 ):
if 0 <= i < 20:
lowercase__ = (b & c) | ((~b) & d)
lowercase__ = 0X5_a_8_2_7_9_9_9
elif 20 <= i < 40:
lowercase__ = b ^ c ^ d
lowercase__ = 0X6_e_d_9_e_b_a_1
elif 40 <= i < 60:
lowercase__ = (b & c) | (b & d) | (c & d)
lowercase__ = 0X8_f_1_b_b_c_d_c
elif 60 <= i < 80:
lowercase__ = b ^ c ^ d
lowercase__ = 0Xc_a_6_2_c_1_d_6
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = (
self.rotate(UpperCAmelCase_ ,5 ) + f + e + k + expanded_block[i] & 0Xf_f_f_f_f_f_f_f,
a,
self.rotate(UpperCAmelCase_ ,30 ),
c,
d,
)
lowercase__ = (
self.h[0] + a & 0Xf_f_f_f_f_f_f_f,
self.h[1] + b & 0Xf_f_f_f_f_f_f_f,
self.h[2] + c & 0Xf_f_f_f_f_f_f_f,
self.h[3] + d & 0Xf_f_f_f_f_f_f_f,
self.h[4] + e & 0Xf_f_f_f_f_f_f_f,
)
return ("{:08x}" * 5).format(*self.h )
def lowerCamelCase ( ):
'''simple docstring'''
lowercase__ = B"Test String"
assert SHAaHash(_snake_case ).final_hash() == hashlib.shaa(_snake_case ).hexdigest() # noqa: S324
def lowerCamelCase ( ):
'''simple docstring'''
lowercase__ = argparse.ArgumentParser(description="Process some strings or files" )
parser.add_argument(
"--string" ,dest="input_string" ,default="Hello World!! Welcome to Cryptography" ,help="Hash the string" ,)
parser.add_argument("--file" ,dest="input_file" ,help="Hash contents of a file" )
lowercase__ = parser.parse_args()
lowercase__ = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file ,"rb" ) as f:
lowercase__ = f.read()
else:
lowercase__ = bytes(_snake_case ,"utf-8" )
print(SHAaHash(_snake_case ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 539 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class snake_case (UpperCamelCase , unittest.TestCase ):
lowerCAmelCase__ :int = TextToVideoSDPipeline
lowerCAmelCase__ :Union[str, Any] = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase__ :List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
lowerCAmelCase__ :Optional[Any] = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def _a ( self ) -> Optional[int]:
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") ,up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") ,cross_attention_dim=32 ,attention_head_dim=4 ,)
lowercase__ = DDIMScheduler(
beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule="scaled_linear" ,clip_sample=UpperCAmelCase_ ,set_alpha_to_one=UpperCAmelCase_ ,)
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] ,up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] ,latent_channels=4 ,sample_size=128 ,)
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,hidden_act="gelu" ,projection_dim=512 ,)
lowercase__ = CLIPTextModel(UpperCAmelCase_ )
lowercase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowercase__ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_=0 ) -> int:
if str(UpperCAmelCase_ ).startswith("mps" ):
lowercase__ = torch.manual_seed(UpperCAmelCase_ )
else:
lowercase__ = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
lowercase__ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def _a ( self ) -> Tuple:
lowercase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__ = self.get_dummy_components()
lowercase__ = TextToVideoSDPipeline(**UpperCAmelCase_ )
lowercase__ = sd_pipe.to(UpperCAmelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowercase__ = self.get_dummy_inputs(UpperCAmelCase_ )
lowercase__ = "np"
lowercase__ = sd_pipe(**UpperCAmelCase_ ).frames
lowercase__ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
lowercase__ = np.array([1_58.0, 1_60.0, 1_53.0, 1_25.0, 1_00.0, 1_21.0, 1_11.0, 93.0, 1_13.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=UpperCAmelCase_ ,expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,)
def _a ( self ) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCAmelCase_ ,expected_max_diff=1E-2 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def _a ( self ) -> Union[str, Any]:
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def _a ( self ) -> Union[str, Any]:
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def _a ( self ) -> Union[str, Any]:
pass
def _a ( self ) -> str:
return super().test_progress_bar()
@slow
@skip_mps
class snake_case (unittest.TestCase ):
def _a ( self ) -> Optional[int]:
lowercase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" )
lowercase__ = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
lowercase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowercase__ = pipe.to("cuda" )
lowercase__ = "Spiderman is surfing"
lowercase__ = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase__ = pipe(UpperCAmelCase_ ,generator=UpperCAmelCase_ ,num_inference_steps=25 ,output_type="pt" ).frames
lowercase__ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def _a ( self ) -> Optional[int]:
lowercase__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" )
lowercase__ = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
lowercase__ = pipe.to("cuda" )
lowercase__ = "Spiderman is surfing"
lowercase__ = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase__ = pipe(UpperCAmelCase_ ,generator=UpperCAmelCase_ ,num_inference_steps=2 ,output_type="pt" ).frames
lowercase__ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 539 | 1 |
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def _UpperCAmelCase ( UpperCamelCase: Optional[int] , UpperCamelCase: Dict , UpperCamelCase: int ):
"""simple docstring"""
__lowerCAmelCase = os.path.abspath(UpperCamelCase )
logger.info(F"Converting TensorFlow checkpoint from {tf_path}" )
# Load weights from TF model
__lowerCAmelCase = tf.train.list_variables(UpperCamelCase )
__lowerCAmelCase = []
__lowerCAmelCase = []
__lowerCAmelCase = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
__lowerCAmelCase = full_name.split("/" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F"Skipping non-model layer {full_name}" )
continue
if "optimizer" in full_name:
logger.info(F"Skipping optimization layer {full_name}" )
continue
if name[0] == "model":
# ignore initial 'model'
__lowerCAmelCase = name[1:]
# figure out how many levels deep the name is
__lowerCAmelCase = 0
for _name in name:
if _name.startswith("layer_with_weights" ):
depth += 1
else:
break
layer_depth.append(UpperCamelCase )
# read data
__lowerCAmelCase = tf.train.load_variable(UpperCamelCase , UpperCamelCase )
names.append("/".join(UpperCamelCase ) )
arrays.append(UpperCamelCase )
logger.info(F"Read a total of {len(UpperCamelCase ):,} layers" )
# Sanity check
if len(set(UpperCamelCase ) ) != 1:
raise ValueError(F"Found layer names with different depths (layer depth {list(set(UpperCamelCase ) )})" )
__lowerCAmelCase = list(set(UpperCamelCase ) )[0]
if layer_depth != 1:
raise ValueError(
"The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"
" heads." )
# convert layers
logger.info("Converting weights..." )
for full_name, array in zip(UpperCamelCase , UpperCamelCase ):
__lowerCAmelCase = full_name.split("/" )
__lowerCAmelCase = model
__lowerCAmelCase = []
for i, m_name in enumerate(UpperCamelCase ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("layer_with_weights" ):
__lowerCAmelCase = int(m_name.split("-" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["embeddings", "LayerNorm"] )
__lowerCAmelCase = getattr(UpperCamelCase , "embeddings" )
__lowerCAmelCase = getattr(UpperCamelCase , "LayerNorm" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["encoder", "layer", str(layer_num - 4 )] )
__lowerCAmelCase = getattr(UpperCamelCase , "encoder" )
__lowerCAmelCase = getattr(UpperCamelCase , "layer" )
__lowerCAmelCase = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["pooler", "dense"] )
__lowerCAmelCase = getattr(UpperCamelCase , "pooler" )
__lowerCAmelCase = getattr(UpperCamelCase , "dense" )
elif m_name == "embeddings":
trace.append("embeddings" )
__lowerCAmelCase = getattr(UpperCamelCase , "embeddings" )
if layer_num == 0:
trace.append("word_embeddings" )
__lowerCAmelCase = getattr(UpperCamelCase , "word_embeddings" )
elif layer_num == 1:
trace.append("position_embeddings" )
__lowerCAmelCase = getattr(UpperCamelCase , "position_embeddings" )
elif layer_num == 2:
trace.append("token_type_embeddings" )
__lowerCAmelCase = getattr(UpperCamelCase , "token_type_embeddings" )
else:
raise ValueError(F"Unknown embedding layer with name {full_name}" )
trace.append("weight" )
__lowerCAmelCase = getattr(UpperCamelCase , "weight" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["attention", "self"] )
__lowerCAmelCase = getattr(UpperCamelCase , "attention" )
__lowerCAmelCase = getattr(UpperCamelCase , "self" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["attention", "output", "LayerNorm"] )
__lowerCAmelCase = getattr(UpperCamelCase , "attention" )
__lowerCAmelCase = getattr(UpperCamelCase , "output" )
__lowerCAmelCase = getattr(UpperCamelCase , "LayerNorm" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["attention", "output", "dense"] )
__lowerCAmelCase = getattr(UpperCamelCase , "attention" )
__lowerCAmelCase = getattr(UpperCamelCase , "output" )
__lowerCAmelCase = getattr(UpperCamelCase , "dense" )
elif m_name == "_output_dense":
# output dense
trace.extend(["output", "dense"] )
__lowerCAmelCase = getattr(UpperCamelCase , "output" )
__lowerCAmelCase = getattr(UpperCamelCase , "dense" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["output", "LayerNorm"] )
__lowerCAmelCase = getattr(UpperCamelCase , "output" )
__lowerCAmelCase = getattr(UpperCamelCase , "LayerNorm" )
elif m_name == "_key_dense":
# attention key
trace.append("key" )
__lowerCAmelCase = getattr(UpperCamelCase , "key" )
elif m_name == "_query_dense":
# attention query
trace.append("query" )
__lowerCAmelCase = getattr(UpperCamelCase , "query" )
elif m_name == "_value_dense":
# attention value
trace.append("value" )
__lowerCAmelCase = getattr(UpperCamelCase , "value" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["intermediate", "dense"] )
__lowerCAmelCase = getattr(UpperCamelCase , "intermediate" )
__lowerCAmelCase = getattr(UpperCamelCase , "dense" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("output" )
__lowerCAmelCase = getattr(UpperCamelCase , "output" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("bias" )
__lowerCAmelCase = getattr(UpperCamelCase , "bias" )
elif m_name in ["kernel", "gamma"]:
trace.append("weight" )
__lowerCAmelCase = getattr(UpperCamelCase , "weight" )
else:
logger.warning(F"Ignored {m_name}" )
# for certain layers reshape is necessary
__lowerCAmelCase = ".".join(UpperCamelCase )
if re.match(R"(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)" , UpperCamelCase ) or re.match(
R"(\S+)\.attention\.output\.dense\.weight" , UpperCamelCase ):
__lowerCAmelCase = array.reshape(pointer.data.shape )
if "kernel" in full_name:
__lowerCAmelCase = array.transpose()
if pointer.shape == array.shape:
__lowerCAmelCase = torch.from_numpy(UpperCamelCase )
else:
raise ValueError(
F"Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"
F" {array.shape}" )
logger.info(F"Successfully set variable {full_name} to PyTorch layer {trace}" )
return model
def _UpperCAmelCase ( UpperCamelCase: str , UpperCamelCase: Tuple , UpperCamelCase: Optional[Any] ):
"""simple docstring"""
logger.info(F"Loading model based on config from {config_path}..." )
__lowerCAmelCase = BertConfig.from_json_file(UpperCamelCase )
__lowerCAmelCase = BertModel(UpperCamelCase )
# Load weights from checkpoint
logger.info(F"Loading weights from checkpoint {tf_checkpoint_path}..." )
load_tfa_weights_in_bert(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Save pytorch-model
logger.info(F"Saving PyTorch model to {pytorch_dump_path}..." )
torch.save(model.state_dict() , UpperCamelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow 2.x checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model (must include filename).",
)
UpperCamelCase_ = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 611 |
from sklearn.metrics import recall_score
import datasets
UpperCamelCase_ = "\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n"
UpperCamelCase_ = "\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n - `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {'recall': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric('recall')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {'recall': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric('recall')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {'recall': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric('recall')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')\n >>> print(results)\n {'recall': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'recall': array([1., 0., 0.])}\n"
UpperCamelCase_ = "\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"] , )
def UpperCAmelCase__ ( self : Dict , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Union[str, Any]=None , snake_case__ : int=1 , snake_case__ : List[str]="binary" , snake_case__ : Tuple=None , snake_case__ : Dict="warn" , ):
"""simple docstring"""
__lowerCAmelCase = recall_score(
snake_case__ , snake_case__ , labels=snake_case__ , pos_label=snake_case__ , average=snake_case__ , sample_weight=snake_case__ , zero_division=snake_case__ , )
return {"recall": float(snake_case__ ) if score.size == 1 else score}
| 611 | 1 |
'''simple docstring'''
from copy import deepcopy
class snake_case__ :
"""simple docstring"""
def __init__( self : int, _snake_case : List[str] = None, _snake_case : List[str] = None ) ->Optional[int]:
if arr is None and size is not None:
snake_case__ : Dict = size
snake_case__ : Optional[int] = [0] * size
elif arr is not None:
self.init(_snake_case )
else:
raise ValueError('Either arr or size must be specified' )
def lowercase_ ( self : Union[str, Any], _snake_case : int ) ->List[str]:
snake_case__ : str = len(_snake_case )
snake_case__ : Any = deepcopy(_snake_case )
for i in range(1, self.size ):
snake_case__ : Dict = self.next_(_snake_case )
if j < self.size:
self.tree[j] += self.tree[i]
def lowercase_ ( self : Any ) ->int:
snake_case__ : Tuple = self.tree[:]
for i in range(self.size - 1, 0, -1 ):
snake_case__ : Optional[Any] = self.next_(_snake_case )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def lowercase_ ( _snake_case : str ) ->Any:
return index + (index & (-index))
@staticmethod
def lowercase_ ( _snake_case : Any ) ->Any:
return index - (index & (-index))
def lowercase_ ( self : str, _snake_case : Union[str, Any], _snake_case : List[str] ) ->List[str]:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
snake_case__ : str = self.next_(_snake_case )
def lowercase_ ( self : Dict, _snake_case : Any, _snake_case : str ) ->str:
self.add(_snake_case, value - self.get(_snake_case ) )
def lowercase_ ( self : List[str], _snake_case : int ) ->Union[str, Any]:
if right == 0:
return 0
snake_case__ : List[str] = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
snake_case__ : Any = self.prev(_snake_case )
return result
def lowercase_ ( self : int, _snake_case : Union[str, Any], _snake_case : List[Any] ) ->List[Any]:
return self.prefix(_snake_case ) - self.prefix(_snake_case )
def lowercase_ ( self : List[str], _snake_case : Any ) ->int:
return self.query(_snake_case, index + 1 )
def lowercase_ ( self : Tuple, _snake_case : Tuple ) ->Optional[int]:
value -= self.tree[0]
if value < 0:
return -1
snake_case__ : Tuple = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
snake_case__ : Dict = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
from __future__ import annotations
def lowercase_ (A : list[int] ):
return len(set(A ) ) == len(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 243 | 0 |
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Union[str, Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_UpperCamelCase : List[Any] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
_UpperCamelCase : List[str] = tuple[int, int]
class _lowercase:
"""simple docstring"""
def __init__( self: List[str] ,a: Dict ,a: str ,a: List[Any] ,a: Dict ,a: List[Any] ,a: str ,):
__UpperCAmelCase = pos_x
__UpperCAmelCase = pos_y
__UpperCAmelCase = (pos_y, pos_x)
__UpperCAmelCase = goal_x
__UpperCAmelCase = goal_y
__UpperCAmelCase = g_cost
__UpperCAmelCase = parent
__UpperCAmelCase = self.calculate_heuristic()
__UpperCAmelCase = self.g_cost + self.h_cost
def snake_case ( self: Optional[Any] ):
__UpperCAmelCase = self.pos_x - self.goal_x
__UpperCAmelCase = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowerCAmelCase__ ) + abs(lowerCAmelCase__ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self: Tuple ,a: Dict ):
return self.f_cost < other.f_cost
class _lowercase:
"""simple docstring"""
def __init__( self: int ,a: Union[str, Any] ,a: Any ):
__UpperCAmelCase = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,0 ,lowerCAmelCase__ )
__UpperCAmelCase = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,99999 ,lowerCAmelCase__ )
__UpperCAmelCase = [self.start]
__UpperCAmelCase = []
__UpperCAmelCase = False
def snake_case ( self: Optional[int] ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__UpperCAmelCase = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowerCAmelCase__ )
self.closed_nodes.append(lowerCAmelCase__ )
__UpperCAmelCase = self.get_successors(lowerCAmelCase__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCAmelCase__ )
else:
# retrieve the best current path
__UpperCAmelCase = self.open_nodes.pop(self.open_nodes.index(lowerCAmelCase__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCAmelCase__ )
else:
self.open_nodes.append(lowerCAmelCase__ )
return [self.start.pos]
def snake_case ( self: Optional[int] ,a: List[Any] ):
__UpperCAmelCase = []
for action in delta:
__UpperCAmelCase = parent.pos_x + action[1]
__UpperCAmelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCAmelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCAmelCase__ ,lowerCAmelCase__ ,self.target.pos_y ,self.target.pos_x ,parent.g_cost + 1 ,lowerCAmelCase__ ,) )
return successors
def snake_case ( self: Any ,a: int ):
__UpperCAmelCase = node
__UpperCAmelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__UpperCAmelCase = current_node.parent
path.reverse()
return path
class _lowercase:
"""simple docstring"""
def __init__( self: Tuple ,a: Tuple ,a: Dict ):
__UpperCAmelCase = AStar(lowerCAmelCase__ ,lowerCAmelCase__ )
__UpperCAmelCase = AStar(lowerCAmelCase__ ,lowerCAmelCase__ )
__UpperCAmelCase = False
def snake_case ( self: Optional[Any] ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__UpperCAmelCase = self.fwd_astar.open_nodes.pop(0 )
__UpperCAmelCase = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowerCAmelCase__ ,lowerCAmelCase__ )
self.fwd_astar.closed_nodes.append(lowerCAmelCase__ )
self.bwd_astar.closed_nodes.append(lowerCAmelCase__ )
__UpperCAmelCase = current_bwd_node
__UpperCAmelCase = current_fwd_node
__UpperCAmelCase = {
self.fwd_astar: self.fwd_astar.get_successors(lowerCAmelCase__ ),
self.bwd_astar: self.bwd_astar.get_successors(lowerCAmelCase__ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowerCAmelCase__ )
else:
# retrieve the best current path
__UpperCAmelCase = astar.open_nodes.pop(
astar.open_nodes.index(lowerCAmelCase__ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowerCAmelCase__ )
else:
astar.open_nodes.append(lowerCAmelCase__ )
return [self.fwd_astar.start.pos]
def snake_case ( self: Optional[Any] ,a: Optional[int] ,a: Tuple ):
__UpperCAmelCase = self.fwd_astar.retrace_path(lowerCAmelCase__ )
__UpperCAmelCase = self.bwd_astar.retrace_path(lowerCAmelCase__ )
bwd_path.pop()
bwd_path.reverse()
__UpperCAmelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
_UpperCamelCase : Any = (0, 0)
_UpperCamelCase : Any = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_UpperCamelCase : Optional[Any] = time.time()
_UpperCamelCase : Optional[Any] = AStar(init, goal)
_UpperCamelCase : str = a_star.search()
_UpperCamelCase : List[Any] = time.time() - start_time
print(f"AStar execution time = {end_time:f} seconds")
_UpperCamelCase : int = time.time()
_UpperCamelCase : List[Any] = BidirectionalAStar(init, goal)
_UpperCamelCase : Tuple = time.time() - bd_start_time
print(f"BidirectionalAStar execution time = {bd_end_time:f} seconds")
| 396 | import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("""Googling.....""")
__a : Tuple = """https://www.google.com/search?q=""" + """ """.join(sys.argv[1:])
__a : List[str] = requests.get(url, headers={"""UserAgent""": UserAgent().random})
# res.raise_for_status()
with open("""project1a.html""", """wb""") as out_file: # only for knowing the class
for data in res.iter_content(1_0_0_0_0):
out_file.write(data)
__a : Dict = BeautifulSoup(res.text, """html.parser""")
__a : Any = list(soup.select(""".eZt8xd"""))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("""href"""))
else:
webbrowser.open(F'''https://google.com{link.get("href")}''') | 534 | 0 |
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
a_ = logging.get_logger(__name__)
a_ = OrderedDict(
[
# Base model mapping
("""albert""", """FlaxAlbertModel"""),
("""bart""", """FlaxBartModel"""),
("""beit""", """FlaxBeitModel"""),
("""bert""", """FlaxBertModel"""),
("""big_bird""", """FlaxBigBirdModel"""),
("""blenderbot""", """FlaxBlenderbotModel"""),
("""blenderbot-small""", """FlaxBlenderbotSmallModel"""),
("""clip""", """FlaxCLIPModel"""),
("""distilbert""", """FlaxDistilBertModel"""),
("""electra""", """FlaxElectraModel"""),
("""gpt-sw3""", """FlaxGPT2Model"""),
("""gpt2""", """FlaxGPT2Model"""),
("""gpt_neo""", """FlaxGPTNeoModel"""),
("""gptj""", """FlaxGPTJModel"""),
("""longt5""", """FlaxLongT5Model"""),
("""marian""", """FlaxMarianModel"""),
("""mbart""", """FlaxMBartModel"""),
("""mt5""", """FlaxMT5Model"""),
("""opt""", """FlaxOPTModel"""),
("""pegasus""", """FlaxPegasusModel"""),
("""regnet""", """FlaxRegNetModel"""),
("""resnet""", """FlaxResNetModel"""),
("""roberta""", """FlaxRobertaModel"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""),
("""roformer""", """FlaxRoFormerModel"""),
("""t5""", """FlaxT5Model"""),
("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""),
("""vit""", """FlaxViTModel"""),
("""wav2vec2""", """FlaxWav2Vec2Model"""),
("""whisper""", """FlaxWhisperModel"""),
("""xglm""", """FlaxXGLMModel"""),
("""xlm-roberta""", """FlaxXLMRobertaModel"""),
]
)
a_ = OrderedDict(
[
# Model for pre-training mapping
("""albert""", """FlaxAlbertForPreTraining"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForPreTraining"""),
("""big_bird""", """FlaxBigBirdForPreTraining"""),
("""electra""", """FlaxElectraForPreTraining"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
a_ = OrderedDict(
[
# Model for Masked LM mapping
("""albert""", """FlaxAlbertForMaskedLM"""),
("""bart""", """FlaxBartForConditionalGeneration"""),
("""bert""", """FlaxBertForMaskedLM"""),
("""big_bird""", """FlaxBigBirdForMaskedLM"""),
("""distilbert""", """FlaxDistilBertForMaskedLM"""),
("""electra""", """FlaxElectraForMaskedLM"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""roberta""", """FlaxRobertaForMaskedLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""),
("""roformer""", """FlaxRoFormerForMaskedLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""),
]
)
a_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
("""bart""", """FlaxBartForConditionalGeneration"""),
("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""),
("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""),
("""encoder-decoder""", """FlaxEncoderDecoderModel"""),
("""longt5""", """FlaxLongT5ForConditionalGeneration"""),
("""marian""", """FlaxMarianMTModel"""),
("""mbart""", """FlaxMBartForConditionalGeneration"""),
("""mt5""", """FlaxMT5ForConditionalGeneration"""),
("""pegasus""", """FlaxPegasusForConditionalGeneration"""),
("""t5""", """FlaxT5ForConditionalGeneration"""),
]
)
a_ = OrderedDict(
[
# Model for Image-classsification
("""beit""", """FlaxBeitForImageClassification"""),
("""regnet""", """FlaxRegNetForImageClassification"""),
("""resnet""", """FlaxResNetForImageClassification"""),
("""vit""", """FlaxViTForImageClassification"""),
]
)
a_ = OrderedDict(
[
("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""),
]
)
a_ = OrderedDict(
[
# Model for Causal LM mapping
("""bart""", """FlaxBartForCausalLM"""),
("""bert""", """FlaxBertForCausalLM"""),
("""big_bird""", """FlaxBigBirdForCausalLM"""),
("""electra""", """FlaxElectraForCausalLM"""),
("""gpt-sw3""", """FlaxGPT2LMHeadModel"""),
("""gpt2""", """FlaxGPT2LMHeadModel"""),
("""gpt_neo""", """FlaxGPTNeoForCausalLM"""),
("""gptj""", """FlaxGPTJForCausalLM"""),
("""opt""", """FlaxOPTForCausalLM"""),
("""roberta""", """FlaxRobertaForCausalLM"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""),
("""xglm""", """FlaxXGLMForCausalLM"""),
("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""),
]
)
a_ = OrderedDict(
[
# Model for Sequence Classification mapping
("""albert""", """FlaxAlbertForSequenceClassification"""),
("""bart""", """FlaxBartForSequenceClassification"""),
("""bert""", """FlaxBertForSequenceClassification"""),
("""big_bird""", """FlaxBigBirdForSequenceClassification"""),
("""distilbert""", """FlaxDistilBertForSequenceClassification"""),
("""electra""", """FlaxElectraForSequenceClassification"""),
("""mbart""", """FlaxMBartForSequenceClassification"""),
("""roberta""", """FlaxRobertaForSequenceClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""),
("""roformer""", """FlaxRoFormerForSequenceClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""),
]
)
a_ = OrderedDict(
[
# Model for Question Answering mapping
("""albert""", """FlaxAlbertForQuestionAnswering"""),
("""bart""", """FlaxBartForQuestionAnswering"""),
("""bert""", """FlaxBertForQuestionAnswering"""),
("""big_bird""", """FlaxBigBirdForQuestionAnswering"""),
("""distilbert""", """FlaxDistilBertForQuestionAnswering"""),
("""electra""", """FlaxElectraForQuestionAnswering"""),
("""mbart""", """FlaxMBartForQuestionAnswering"""),
("""roberta""", """FlaxRobertaForQuestionAnswering"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""),
("""roformer""", """FlaxRoFormerForQuestionAnswering"""),
("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""),
]
)
a_ = OrderedDict(
[
# Model for Token Classification mapping
("""albert""", """FlaxAlbertForTokenClassification"""),
("""bert""", """FlaxBertForTokenClassification"""),
("""big_bird""", """FlaxBigBirdForTokenClassification"""),
("""distilbert""", """FlaxDistilBertForTokenClassification"""),
("""electra""", """FlaxElectraForTokenClassification"""),
("""roberta""", """FlaxRobertaForTokenClassification"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""),
("""roformer""", """FlaxRoFormerForTokenClassification"""),
("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""),
]
)
a_ = OrderedDict(
[
# Model for Multiple Choice mapping
("""albert""", """FlaxAlbertForMultipleChoice"""),
("""bert""", """FlaxBertForMultipleChoice"""),
("""big_bird""", """FlaxBigBirdForMultipleChoice"""),
("""distilbert""", """FlaxDistilBertForMultipleChoice"""),
("""electra""", """FlaxElectraForMultipleChoice"""),
("""roberta""", """FlaxRobertaForMultipleChoice"""),
("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""),
("""roformer""", """FlaxRoFormerForMultipleChoice"""),
("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""),
]
)
a_ = OrderedDict(
[
("""bert""", """FlaxBertForNextSentencePrediction"""),
]
)
a_ = OrderedDict(
[
("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""),
("""whisper""", """FlaxWhisperForConditionalGeneration"""),
]
)
a_ = OrderedDict(
[
("""whisper""", """FlaxWhisperForAudioClassification"""),
]
)
a_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
a_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
a_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
a_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
a_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class A_(_BaseAutoModelClass ):
"""simple docstring"""
a_ : Dict = FLAX_MODEL_MAPPING
a_ = auto_class_update(FlaxAutoModel)
class A_(_BaseAutoModelClass ):
"""simple docstring"""
a_ : Tuple = FLAX_MODEL_FOR_PRETRAINING_MAPPING
a_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""")
class A_(_BaseAutoModelClass ):
"""simple docstring"""
a_ : str = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
a_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""")
class A_(_BaseAutoModelClass ):
"""simple docstring"""
a_ : Any = FLAX_MODEL_FOR_MASKED_LM_MAPPING
a_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""")
class A_(_BaseAutoModelClass ):
"""simple docstring"""
a_ : Any = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base"""
)
class A_(_BaseAutoModelClass ):
"""simple docstring"""
a_ : Optional[int] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc="""sequence classification"""
)
class A_(_BaseAutoModelClass ):
"""simple docstring"""
a_ : str = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
a_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""")
class A_(_BaseAutoModelClass ):
"""simple docstring"""
a_ : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
a_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc="""token classification"""
)
class A_(_BaseAutoModelClass ):
"""simple docstring"""
a_ : Optional[int] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
a_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""")
class A_(_BaseAutoModelClass ):
"""simple docstring"""
a_ : List[str] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
a_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction"""
)
class A_(_BaseAutoModelClass ):
"""simple docstring"""
a_ : Tuple = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
a_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc="""image classification"""
)
class A_(_BaseAutoModelClass ):
"""simple docstring"""
a_ : Dict = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
a_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""")
class A_(_BaseAutoModelClass ):
"""simple docstring"""
a_ : Optional[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
a_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling"""
)
| 349 |
"""simple docstring"""
def UpperCAmelCase_ ( __a : int = 10_00 ):
'''simple docstring'''
_lowerCamelCase , _lowerCamelCase : Dict = 1, 1
_lowerCamelCase : Optional[Any] = 2
while True:
_lowerCamelCase : str = 0
_lowerCamelCase : Optional[Any] = fa + fa
_lowerCamelCase , _lowerCamelCase : Optional[int] = fa, f
index += 1
for _ in str(__a ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 349 | 1 |