code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def UpperCamelCase ( __magic_name__ : Tuple ) -> Tuple:
"""simple docstring"""
if is_torch_version("""<""" , """2.0.0""" ) or not hasattr(UpperCamelCase__ , """_dynamo""" ):
return False
return isinstance(UpperCamelCase__ , torch._dynamo.eval_frame.OptimizedModule )
def UpperCamelCase ( __magic_name__ : Any , __magic_name__ : List[Any] = True ) -> str:
"""simple docstring"""
lowercase__ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
lowercase__ = is_compiled_module(UpperCamelCase__ )
if is_compiled:
lowercase__ = model
lowercase__ = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase__ = model.module
if not keep_fpaa_wrapper:
lowercase__ = getattr(UpperCamelCase__ , """forward""" )
lowercase__ = model.__dict__.pop("""_original_forward""" , UpperCamelCase__ )
if original_forward is not None:
while hasattr(UpperCamelCase__ , """__wrapped__""" ):
lowercase__ = forward.__wrapped__
if forward == original_forward:
break
lowercase__ = forward
if getattr(UpperCamelCase__ , """_converted_to_transformer_engine""" , UpperCamelCase__ ):
convert_model(UpperCamelCase__ , to_transformer_engine=UpperCamelCase__ )
if is_compiled:
lowercase__ = model
lowercase__ = compiled_model
return model
def UpperCamelCase ( ) -> int:
"""simple docstring"""
PartialState().wait_for_everyone()
def UpperCamelCase ( __magic_name__ : int , __magic_name__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
if PartialState().distributed_type == DistributedType.TPU:
xm.save(UpperCamelCase__ , UpperCamelCase__ )
elif PartialState().local_process_index == 0:
torch.save(UpperCamelCase__ , UpperCamelCase__ )
@contextmanager
def UpperCamelCase ( **__magic_name__ : int ) -> Union[str, Any]:
"""simple docstring"""
for key, value in kwargs.items():
lowercase__ = str(UpperCamelCase__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def UpperCamelCase ( __magic_name__ : str ) -> Tuple:
"""simple docstring"""
if not hasattr(UpperCamelCase__ , """__qualname__""" ) and not hasattr(UpperCamelCase__ , """__name__""" ):
lowercase__ = getattr(UpperCamelCase__ , """__class__""" , UpperCamelCase__ )
if hasattr(UpperCamelCase__ , """__qualname__""" ):
return obj.__qualname__
if hasattr(UpperCamelCase__ , """__name__""" ):
return obj.__name__
return str(UpperCamelCase__ )
def UpperCamelCase ( __magic_name__ : Tuple , __magic_name__ : Any ) -> Tuple:
"""simple docstring"""
for key, value in source.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowercase__ = destination.setdefault(UpperCamelCase__ , {} )
merge_dicts(UpperCamelCase__ , UpperCamelCase__ )
else:
lowercase__ = value
return destination
def UpperCamelCase ( __magic_name__ : List[str] = None ) -> bool:
"""simple docstring"""
if port is None:
lowercase__ = 2_9500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(("""localhost""", port) ) == 0
| 15 |
"""simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 690 | 0 |
import requests
from bsa import BeautifulSoup
def lowerCamelCase ( UpperCamelCase : str = "AAPL" ) -> str:
_lowerCamelCase = F"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
_lowerCamelCase = BeautifulSoup(requests.get(UpperCamelCase ).text , 'html.parser' )
_lowerCamelCase = 'My(6px) Pos(r) smartphone_Mt(6px)'
return soup.find('div' , class_=class_ ).find('span' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''') | 234 | import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : List[Any] , snake_case__ : int , snake_case__ : Optional[Any]=1_3 , snake_case__ : Tuple=7 , snake_case__ : Any=True , snake_case__ : Any=True , snake_case__ : Optional[Any]=False , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=9_9 , snake_case__ : Optional[int]=6_4 , snake_case__ : Dict=5 , snake_case__ : Dict=4 , snake_case__ : Union[str, Any]=6_4 , snake_case__ : Dict="gelu" , snake_case__ : Optional[int]=0.1 , snake_case__ : Optional[Any]=0.1 , snake_case__ : List[Any]=5_1_2 , snake_case__ : Any=1_6 , snake_case__ : List[str]=2 , snake_case__ : Dict=0.02 , snake_case__ : Tuple=3 , snake_case__ : Optional[int]=4 , snake_case__ : Optional[int]=None , ) -> Optional[int]:
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = seq_length
_lowerCamelCase = is_training
_lowerCamelCase = use_input_mask
_lowerCamelCase = use_token_type_ids
_lowerCamelCase = use_labels
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = type_vocab_size
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = num_labels
_lowerCamelCase = num_choices
_lowerCamelCase = scope
def _snake_case ( self : Tuple ) -> Union[str, Any]:
return MPNetConfig.from_pretrained('microsoft/mpnet-base' )
def _snake_case ( self : List[str] ) -> str:
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCamelCase = None
if self.use_input_mask:
_lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCamelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self : Union[str, Any] ) -> int:
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _snake_case ( self : str , snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : Dict ) -> Dict:
_lowerCamelCase = MPNetModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCamelCase = model(snake_case__ , snake_case__ )
_lowerCamelCase = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _snake_case ( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : str , snake_case__ : Tuple , snake_case__ : Union[str, Any] ) -> List[str]:
_lowerCamelCase = MPNetForQuestionAnswering(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCamelCase = model(
snake_case__ , attention_mask=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self : Optional[int] , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : List[str] ) -> List[str]:
_lowerCamelCase = self.num_labels
_lowerCamelCase = MPNetForSequenceClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCamelCase = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : Optional[int] , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : int , snake_case__ : Optional[int] ) -> Any:
_lowerCamelCase = self.num_choices
_lowerCamelCase = MPNetForMultipleChoice(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCamelCase = model(
snake_case__ , attention_mask=snake_case__ , labels=snake_case__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self : str , snake_case__ : Any , snake_case__ : Dict , snake_case__ : str , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : str ) -> Optional[int]:
_lowerCamelCase = self.num_labels
_lowerCamelCase = MPNetForTokenClassification(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCamelCase = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self : int ) -> Any:
_lowerCamelCase = self.prepare_config_and_inputs()
((_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase)) = config_and_inputs
_lowerCamelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ = (
{
'feature-extraction': MPNetModel,
'fill-mask': MPNetForMaskedLM,
'question-answering': MPNetForQuestionAnswering,
'text-classification': MPNetForSequenceClassification,
'token-classification': MPNetForTokenClassification,
'zero-shot': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = True
def _snake_case ( self : Any ) -> List[Any]:
_lowerCamelCase = MPNetModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 )
def _snake_case ( self : Tuple ) -> Tuple:
self.config_tester.run_common_tests()
def _snake_case ( self : Tuple ) -> List[Any]:
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*snake_case__ )
def _snake_case ( self : List[Any] ) -> Optional[int]:
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*snake_case__ )
def _snake_case ( self : Optional[int] ) -> List[Any]:
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*snake_case__ )
def _snake_case ( self : Tuple ) -> Union[str, Any]:
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*snake_case__ )
def _snake_case ( self : Dict ) -> Dict:
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*snake_case__ )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _snake_case ( self : Tuple ) -> Optional[Any]:
_lowerCamelCase = MPNetModel.from_pretrained('microsoft/mpnet-base' )
_lowerCamelCase = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
_lowerCamelCase = model(snake_case__ )[0]
_lowerCamelCase = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , snake_case__ )
_lowerCamelCase = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) ) | 234 | 1 |
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__lowerCamelCase : List[str] = threading.Lock()
__lowerCamelCase : Optional[logging.Handler] = None
__lowerCamelCase : Dict = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
__lowerCamelCase : str = logging.WARNING
__lowerCamelCase : Dict = True
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : List[Any] = os.getenv("TRANSFORMERS_VERBOSITY" , snake_case_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '''
F'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def SCREAMING_SNAKE_CASE ( ):
return __name__.split("." )[0]
def SCREAMING_SNAKE_CASE ( ):
return logging.getLogger(_get_library_name() )
def SCREAMING_SNAKE_CASE ( ):
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
snake_case__ : str = logging.StreamHandler() # Set sys.stderr as stream.
snake_case__ : int = sys.stderr.flush
# Apply our default configuration to the library root logger.
snake_case__ : List[Any] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
snake_case__ : str = False
def SCREAMING_SNAKE_CASE ( ):
global _default_handler
with _lock:
if not _default_handler:
return
snake_case__ : Optional[int] = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
snake_case__ : int = None
def SCREAMING_SNAKE_CASE ( ):
return log_levels
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[str] = None ):
if name is None:
snake_case__ : Optional[int] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(snake_case_ )
def SCREAMING_SNAKE_CASE ( ):
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
_configure_library_root_logger()
_get_library_root_logger().setLevel(snake_case_ )
def SCREAMING_SNAKE_CASE ( ):
return set_verbosity(snake_case_ )
def SCREAMING_SNAKE_CASE ( ):
return set_verbosity(snake_case_ )
def SCREAMING_SNAKE_CASE ( ):
return set_verbosity(snake_case_ )
def SCREAMING_SNAKE_CASE ( ):
return set_verbosity(snake_case_ )
def SCREAMING_SNAKE_CASE ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def SCREAMING_SNAKE_CASE ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def SCREAMING_SNAKE_CASE ( snake_case_ : logging.Handler ):
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : logging.Handler ):
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(snake_case_ )
def SCREAMING_SNAKE_CASE ( ):
_configure_library_root_logger()
snake_case__ : Optional[int] = False
def SCREAMING_SNAKE_CASE ( ):
_configure_library_root_logger()
snake_case__ : List[Any] = True
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Tuple = _get_library_root_logger().handlers
for handler in handlers:
snake_case__ : List[Any] = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(snake_case_ )
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Union[str, Any] = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(snake_case_ )
def SCREAMING_SNAKE_CASE ( self : List[Any] , *snake_case_ : Union[str, Any] , **snake_case_ : str ):
snake_case__ : Tuple = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , snake_case_ )
if no_advisory_warnings:
return
self.warning(*snake_case_ , **snake_case_ )
__lowerCamelCase : Optional[int] = warning_advice
@functools.lru_cache(snake_case_ )
def SCREAMING_SNAKE_CASE ( self : str , *snake_case_ : Dict , **snake_case_ : List[Any] ):
self.warning(*snake_case_ , **snake_case_ )
__lowerCamelCase : int = warning_once
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Dict , *__A : Optional[int] , **__A : Dict ): # pylint: disable=unused-argument
snake_case__ : List[Any] = args[0] if args else None
def __iter__( self : int ):
return iter(self._iterator )
def __getattr__( self : List[Any] , __A : Dict ):
def empty_fn(*__A : Optional[Any] , **__A : Union[str, Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : Optional[Any] ):
return self
def __exit__( self : Optional[int] , __A : Union[str, Any] , __A : str , __A : List[str] ):
return
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __call__( self : Any , *__A : List[str] , **__A : Dict ):
if _tqdm_active:
return tqdm_lib.tqdm(*__A , **__A )
else:
return EmptyTqdm(*__A , **__A )
def _lowercase ( self : Any , *__A : List[str] , **__A : List[Any] ):
snake_case__ : Optional[int] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__A , **__A )
def _lowercase ( self : int ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__lowerCamelCase : Optional[int] = _tqdm_cls()
def SCREAMING_SNAKE_CASE ( ):
global _tqdm_active
return bool(_tqdm_active )
def SCREAMING_SNAKE_CASE ( ):
global _tqdm_active
snake_case__ : Optional[int] = True
hf_hub_utils.enable_progress_bars()
def SCREAMING_SNAKE_CASE ( ):
global _tqdm_active
snake_case__ : Optional[Any] = False
hf_hub_utils.disable_progress_bars()
| 297 |
# Imports
import numpy as np
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Optional[Any] , __A : Optional[int]=None , __A : Any=None , __A : Optional[int]=None , __A : Tuple=None , __A : List[Any]=None ):
self.set_matricies(red=__A , green=__A , blue=__A , red_edge=__A , nir=__A )
def _lowercase ( self : List[Any] , __A : Union[str, Any]=None , __A : List[str]=None , __A : Optional[Any]=None , __A : Union[str, Any]=None , __A : str=None ):
if red is not None:
snake_case__ : Dict = red
if green is not None:
snake_case__ : Tuple = green
if blue is not None:
snake_case__ : Dict = blue
if red_edge is not None:
snake_case__ : Union[str, Any] = red_edge
if nir is not None:
snake_case__ : Dict = nir
return True
def _lowercase ( self : str , __A : Any="" , __A : List[Any]=None , __A : Tuple=None , __A : Any=None , __A : Optional[Any]=None , __A : List[Any]=None ):
self.set_matricies(red=__A , green=__A , blue=__A , red_edge=__A , nir=__A )
snake_case__ : Tuple = {
"ARVI2": self.arvaa,
"CCCI": self.ccci,
"CVI": self.cvi,
"GLI": self.gli,
"NDVI": self.ndvi,
"BNDVI": self.bndvi,
"redEdgeNDVI": self.red_edge_ndvi,
"GNDVI": self.gndvi,
"GBNDVI": self.gbndvi,
"GRNDVI": self.grndvi,
"RBNDVI": self.rbndvi,
"PNDVI": self.pndvi,
"ATSAVI": self.atsavi,
"BWDRVI": self.bwdrvi,
"CIgreen": self.ci_green,
"CIrededge": self.ci_rededge,
"CI": self.ci,
"CTVI": self.ctvi,
"GDVI": self.gdvi,
"EVI": self.evi,
"GEMI": self.gemi,
"GOSAVI": self.gosavi,
"GSAVI": self.gsavi,
"Hue": self.hue,
"IVI": self.ivi,
"IPVI": self.ipvi,
"I": self.i,
"RVI": self.rvi,
"MRVI": self.mrvi,
"MSAVI": self.m_savi,
"NormG": self.norm_g,
"NormNIR": self.norm_nir,
"NormR": self.norm_r,
"NGRDI": self.ngrdi,
"RI": self.ri,
"S": self.s,
"IF": self._if,
"DVI": self.dvi,
"TVI": self.tvi,
"NDRE": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("Index not in the list!" )
return False
def _lowercase ( self : str ):
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def _lowercase ( self : List[Any] ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def _lowercase ( self : str ):
return self.nir * (self.red / (self.green**2))
def _lowercase ( self : List[str] ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def _lowercase ( self : str ):
return (self.nir - self.red) / (self.nir + self.red)
def _lowercase ( self : Tuple ):
return (self.nir - self.blue) / (self.nir + self.blue)
def _lowercase ( self : str ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def _lowercase ( self : Tuple ):
return (self.nir - self.green) / (self.nir + self.green)
def _lowercase ( self : Union[str, Any] ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def _lowercase ( self : Optional[int] ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def _lowercase ( self : Optional[int] ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def _lowercase ( self : Dict ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def _lowercase ( self : int , __A : List[Any]=0.0_8 , __A : int=1.2_2 , __A : Optional[int]=0.0_3 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def _lowercase ( self : str ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def _lowercase ( self : Optional[Any] ):
return (self.nir / self.green) - 1
def _lowercase ( self : int ):
return (self.nir / self.redEdge) - 1
def _lowercase ( self : int ):
return (self.red - self.blue) / self.red
def _lowercase ( self : int ):
snake_case__ : List[Any] = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def _lowercase ( self : Any ):
return self.nir - self.green
def _lowercase ( self : List[Any] ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def _lowercase ( self : List[str] ):
snake_case__ : Tuple = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def _lowercase ( self : str , __A : Any=0.1_6 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def _lowercase ( self : int , __A : int=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def _lowercase ( self : Optional[int] ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) )
def _lowercase ( self : int , __A : int=None , __A : Optional[Any]=None ):
return (self.nir - b) / (a * self.red)
def _lowercase ( self : str ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def _lowercase ( self : List[str] ):
return (self.red + self.green + self.blue) / 3_0.5
def _lowercase ( self : Optional[Any] ):
return self.nir / self.red
def _lowercase ( self : List[str] ):
return (self.rvi() - 1) / (self.rvi() + 1)
def _lowercase ( self : Dict ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def _lowercase ( self : str ):
return self.green / (self.nir + self.red + self.green)
def _lowercase ( self : Tuple ):
return self.nir / (self.nir + self.red + self.green)
def _lowercase ( self : Tuple ):
return self.red / (self.nir + self.red + self.green)
def _lowercase ( self : str ):
return (self.green - self.red) / (self.green + self.red)
def _lowercase ( self : Tuple ):
return (self.red - self.green) / (self.red + self.green)
def _lowercase ( self : List[Any] ):
snake_case__ : str = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
snake_case__ : Dict = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def _lowercase ( self : Any ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def _lowercase ( self : Optional[Any] ):
return self.nir / self.red
def _lowercase ( self : Optional[Any] ):
return (self.ndvi() + 0.5) ** (1 / 2)
def _lowercase ( self : Optional[Any] ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 297 | 1 |
from typing import Dict, Optional
import numpy as np
import datasets
lowerCamelCase_ = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
lowerCamelCase_ = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
lowerCamelCase_ = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = None, __UpperCamelCase = False, ):
if label_map is not None:
for old_id, new_id in label_map.items():
SCREAMING_SNAKE_CASE__ =new_id
# turn into Numpy arrays
SCREAMING_SNAKE_CASE__ =np.array(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =np.array(__UpperCamelCase )
if reduce_labels:
SCREAMING_SNAKE_CASE__ =255
SCREAMING_SNAKE_CASE__ =label - 1
SCREAMING_SNAKE_CASE__ =255
SCREAMING_SNAKE_CASE__ =label != ignore_index
SCREAMING_SNAKE_CASE__ =np.not_equal(__UpperCamelCase, __UpperCamelCase )
SCREAMING_SNAKE_CASE__ =pred_label[mask]
SCREAMING_SNAKE_CASE__ =np.array(__UpperCamelCase )[mask]
SCREAMING_SNAKE_CASE__ =pred_label[pred_label == label]
SCREAMING_SNAKE_CASE__ =np.histogram(__UpperCamelCase, bins=__UpperCamelCase, range=(0, num_labels - 1) )[0]
SCREAMING_SNAKE_CASE__ =np.histogram(__UpperCamelCase, bins=__UpperCamelCase, range=(0, num_labels - 1) )[0]
SCREAMING_SNAKE_CASE__ =np.histogram(__UpperCamelCase, bins=__UpperCamelCase, range=(0, num_labels - 1) )[0]
SCREAMING_SNAKE_CASE__ =area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = None, __UpperCamelCase = False, ):
SCREAMING_SNAKE_CASE__ =np.zeros((num_labels,), dtype=np.floataa )
SCREAMING_SNAKE_CASE__ =np.zeros((num_labels,), dtype=np.floataa )
SCREAMING_SNAKE_CASE__ =np.zeros((num_labels,), dtype=np.floataa )
SCREAMING_SNAKE_CASE__ =np.zeros((num_labels,), dtype=np.floataa )
for result, gt_seg_map in zip(__UpperCamelCase, __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =intersect_and_union(
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase = None, __UpperCamelCase = None, __UpperCamelCase = False, ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =total_intersect_and_union(
__UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase )
# compute metrics
SCREAMING_SNAKE_CASE__ ={}
SCREAMING_SNAKE_CASE__ =total_area_intersect.sum() / total_area_label.sum()
SCREAMING_SNAKE_CASE__ =total_area_intersect / total_area_union
SCREAMING_SNAKE_CASE__ =total_area_intersect / total_area_label
SCREAMING_SNAKE_CASE__ =np.nanmean(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =np.nanmean(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =all_acc
SCREAMING_SNAKE_CASE__ =iou
SCREAMING_SNAKE_CASE__ =acc
if nan_to_num is not None:
SCREAMING_SNAKE_CASE__ ={metric: np.nan_to_num(__UpperCamelCase, nan=__UpperCamelCase ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a ( datasets.Metric ):
"""simple docstring"""
def __A ( self : Optional[Any] ) -> str:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"""predictions""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
"""references""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
} ) ,reference_urls=[
"""https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"""
] ,)
def __A ( self : Tuple ,_UpperCamelCase : Union[str, Any] ,_UpperCamelCase : str ,_UpperCamelCase : int ,_UpperCamelCase : bool ,_UpperCamelCase : Optional[int] = None ,_UpperCamelCase : Optional[Dict[int, int]] = None ,_UpperCamelCase : bool = False ,) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =mean_iou(
results=_UpperCamelCase ,gt_seg_maps=_UpperCamelCase ,num_labels=_UpperCamelCase ,ignore_index=_UpperCamelCase ,nan_to_num=_UpperCamelCase ,label_map=_UpperCamelCase ,reduce_labels=_UpperCamelCase ,)
return iou_result
| 588 |
import functools
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =len(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =len(__UpperCamelCase )
@functools.cache
def min_distance(__UpperCamelCase, __UpperCamelCase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
SCREAMING_SNAKE_CASE__ =int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1, __UpperCamelCase ), 1 + min_distance(__UpperCamelCase, indexa + 1 ), diff + min_distance(indexa + 1, indexa + 1 ), )
return min_distance(0, 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 588 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 484 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def _SCREAMING_SNAKE_CASE ( *SCREAMING_SNAKE_CASE :str ) -> Union[str, Any]:
with open(SCREAMING_SNAKE_CASE , """r""" ) as fh:
fcntl.flock(SCREAMING_SNAKE_CASE , fcntl.LOCK_EX )
try:
print(*SCREAMING_SNAKE_CASE )
finally:
fcntl.flock(SCREAMING_SNAKE_CASE , fcntl.LOCK_UN )
_UpperCAmelCase = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
_UpperCAmelCase = torch.device('cuda', local_rank)
_UpperCAmelCase = socket.gethostname()
_UpperCAmelCase = f'''[{hostname}-{local_rank}]'''
try:
# test distributed
dist.init_process_group('nccl')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
_UpperCAmelCase = dist.get_rank()
_UpperCAmelCase = dist.get_world_size()
printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''')
dist.barrier()
if rank == 0:
printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''')
except Exception:
printflock(f'''{gpu} is broken''')
raise | 504 | 0 |
'''simple docstring'''
lowerCamelCase__ = "0.18.2"
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 713 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
lowerCamelCase__ = logging.get_logger(__name__)
class _lowerCAmelCase ( __A ):
'''simple docstring'''
def __init__( self : Optional[int] , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Optional[int] ) -> None:
'''simple docstring'''
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 411 | 0 |
"""simple docstring"""
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
__lowerCAmelCase : Optional[Any] = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class a_ :
def __init__( self : List[str] , snake_case__ : int = 14 ):
if group not in primes:
raise ValueError("""Unsupported Group""" )
lowerCAmelCase__ = primes[group]["""prime"""]
lowerCAmelCase__ = primes[group]["""generator"""]
lowerCAmelCase__ = int(hexlify(urandom(32 ) ) , base=16 )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
return hex(self.__private_key )[2:]
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = pow(self.generator , self.__private_key , self.prime )
return hex(snake_case__ )[2:]
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(snake_case__ , (self.prime - 1) // 2 , self.prime ) == 1
)
def _SCREAMING_SNAKE_CASE ( self : Any , snake_case__ : str ):
lowerCAmelCase__ = int(snake_case__ , base=16 )
if not self.is_valid_public_key(snake_case__ ):
raise ValueError("""Invalid public key""" )
lowerCAmelCase__ = pow(snake_case__ , self.__private_key , self.prime )
return shaaaa(str(snake_case__ ).encode() ).hexdigest()
@staticmethod
def _SCREAMING_SNAKE_CASE ( snake_case__ : int , snake_case__ : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(snake_case__ , (prime - 1) // 2 , snake_case__ ) == 1
)
@staticmethod
def _SCREAMING_SNAKE_CASE ( snake_case__ : str , snake_case__ : str , snake_case__ : int = 14 ):
lowerCAmelCase__ = int(snake_case__ , base=16 )
lowerCAmelCase__ = int(snake_case__ , base=16 )
lowerCAmelCase__ = primes[group]["""prime"""]
if not DiffieHellman.is_valid_public_key_static(snake_case__ , snake_case__ ):
raise ValueError("""Invalid public key""" )
lowerCAmelCase__ = pow(snake_case__ , snake_case__ , snake_case__ )
return shaaaa(str(snake_case__ ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 644 | """simple docstring"""
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return getitem, k
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return setitem, k, v
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return delitem, k
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ ):
"""simple docstring"""
try:
return fun(lowerCamelCase__ , *lowerCamelCase__ ), None
except Exception as e:
return None, e
__lowerCAmelCase : Optional[Any] = (
_set("key_a", "val_a"),
_set("key_b", "val_b"),
)
__lowerCAmelCase : Optional[int] = [
_set("key_a", "val_a"),
_set("key_a", "val_b"),
]
__lowerCAmelCase : List[Any] = [
_set("key_a", "val_a"),
_set("key_b", "val_b"),
_del("key_a"),
_del("key_b"),
_set("key_a", "val_a"),
_del("key_a"),
]
__lowerCAmelCase : int = [
_get("key_a"),
_del("key_a"),
_set("key_a", "val_a"),
_del("key_a"),
_del("key_a"),
_get("key_a"),
]
__lowerCAmelCase : Optional[Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
__lowerCAmelCase : Any = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("key_a", "val_b"),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items""" ),
pytest.param(_overwrite_items , id="""overwrite items""" ),
pytest.param(_delete_items , id="""delete items""" ),
pytest.param(_access_absent_items , id="""access absent items""" ),
pytest.param(_add_with_resize_up , id="""add with resize up""" ),
pytest.param(_add_with_resize_down , id="""add with resize down""" ),
) , )
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = HashMap(initial_block_size=4 )
lowerCAmelCase__ = {}
for _, (fun, *args) in enumerate(lowerCamelCase__ ):
lowerCAmelCase__ , lowerCAmelCase__ = _run_operation(lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ )
lowerCAmelCase__ , lowerCAmelCase__ = _run_operation(lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ )
assert my_res == py_res
assert str(lowerCamelCase__ ) == str(lowerCamelCase__ )
assert set(lowerCamelCase__ ) == set(lowerCamelCase__ )
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ )
assert set(my.items() ) == set(py.items() )
def _UpperCAmelCase ( ):
"""simple docstring"""
def is_public(lowerCamelCase__ ) -> bool:
return not name.startswith("""_""" )
lowerCAmelCase__ = {name for name in dir({} ) if is_public(lowerCamelCase__ )}
lowerCAmelCase__ = {name for name in dir(HashMap() ) if is_public(lowerCamelCase__ )}
assert dict_public_names > hash_public_names
| 644 | 1 |
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : Optional[Any] = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
_lowerCamelCase : List[Any] = DatasetInfosDict.from_directory(lowercase__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : List[str] = str(lowercase__ )
dataset_info.write_to_directory(lowercase__ )
_lowerCamelCase : Union[str, Any] = DatasetInfo.from_directory(lowercase__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(lowercase__ , 'dataset_info.json' ) )
def _snake_case ( ):
_lowerCamelCase : int = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , )
_lowerCamelCase : Optional[Any] = dataset_info._to_yaml_dict()
assert sorted(lowercase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
_lowerCamelCase : Optional[Any] = yaml.safe_dump(lowercase__ )
_lowerCamelCase : str = yaml.safe_load(lowercase__ )
assert dataset_info_yaml_dict == reloaded
def _snake_case ( ):
_lowerCamelCase : str = DatasetInfo()
_lowerCamelCase : str = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1337 ),
} ),
] , )
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : int = str(lowercase__ )
dataset_infos_dict.write_to_directory(lowercase__ )
_lowerCamelCase : List[str] = DatasetInfosDict.from_directory(lowercase__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_lowerCamelCase : Optional[int] = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_lowerCamelCase : Any = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(lowercase__ , 'README.md' ) ) | 492 |
"""simple docstring"""
lowercase__ = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
lowercase__ = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
lowercase__ = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
lowercase__ = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
lowercase__ = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
lowercase__ = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
lowercase__ = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
lowercase__ = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
] | 492 | 1 |
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowercase_ = logging.get_logger(__name__)
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ['''input_values''', '''attention_mask''']
def __init__( self : Tuple , _A : int = 1 , _A : int = 1_6000 , _A : float = 0.0 , _A : bool = False , _A : int = 80 , _A : int = 16 , _A : int = 64 , _A : str = "hann_window" , _A : float = 1.0 , _A : float = 80 , _A : float = 7600 , _A : float = 1e-10 , _A : int = 2 , _A : bool = True , **_A : Optional[Any] , ):
"""simple docstring"""
super().__init__(feature_size=_A , sampling_rate=_A , padding_value=_A , **_A )
__SCREAMING_SNAKE_CASE : List[Any] = do_normalize
__SCREAMING_SNAKE_CASE : Optional[Any] = return_attention_mask
__SCREAMING_SNAKE_CASE : Optional[Any] = num_mel_bins
__SCREAMING_SNAKE_CASE : Dict = hop_length
__SCREAMING_SNAKE_CASE : Any = win_length
__SCREAMING_SNAKE_CASE : Union[str, Any] = win_function
__SCREAMING_SNAKE_CASE : str = frame_signal_scale
__SCREAMING_SNAKE_CASE : Tuple = fmin
__SCREAMING_SNAKE_CASE : Any = fmax
__SCREAMING_SNAKE_CASE : Dict = mel_floor
__SCREAMING_SNAKE_CASE : Union[str, Any] = reduction_factor
__SCREAMING_SNAKE_CASE : List[str] = win_length * sampling_rate // 1000
__SCREAMING_SNAKE_CASE : List[Any] = hop_length * sampling_rate // 1000
__SCREAMING_SNAKE_CASE : Union[str, Any] = optimal_fft_length(self.sample_size )
__SCREAMING_SNAKE_CASE : str = (self.n_fft // 2) + 1
__SCREAMING_SNAKE_CASE : Optional[int] = window_function(window_length=self.sample_size , name=self.win_function , periodic=_A )
__SCREAMING_SNAKE_CASE : Optional[int] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='''slaney''' , mel_scale='''slaney''' , )
if frame_signal_scale != 1.0:
warnings.warn(
'''The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers''' , _A , )
if reduction_factor != 2.0:
warnings.warn(
'''The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers''' , _A , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def UpperCAmelCase__ ( _A : List[np.ndarray] , _A : List[np.ndarray] , _A : float = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = np.array(_A , np.intaa )
__SCREAMING_SNAKE_CASE : List[Any] = []
for vector, length in zip(_A , attention_mask.sum(-1 ) ):
__SCREAMING_SNAKE_CASE : Tuple = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
__SCREAMING_SNAKE_CASE : Any = padding_value
normed_input_values.append(_A )
else:
__SCREAMING_SNAKE_CASE : int = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def UpperCAmelCase__ ( self : Any , _A : np.ndarray , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = spectrogram(
_A , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='''log10''' , )
return log_mel_spec.T
def __call__( self : Dict , _A : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _A : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _A : Union[bool, str, PaddingStrategy] = False , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : Optional[bool] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[int] = None , **_A : str , ):
"""simple docstring"""
if audio is None and audio_target is None:
raise ValueError('''You must provide either `audio` or `audio_target` values.''' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the ``sampling_rate`` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if audio is not None:
__SCREAMING_SNAKE_CASE : str = self._process_audio(
_A , _A , _A , _A , _A , _A , _A , _A , **_A , )
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
if audio_target is not None:
__SCREAMING_SNAKE_CASE : List[Any] = self._process_audio(
_A , _A , _A , _A , _A , _A , _A , _A , **_A , )
if inputs is None:
return inputs_target
else:
__SCREAMING_SNAKE_CASE : str = inputs_target['''input_values''']
__SCREAMING_SNAKE_CASE : Dict = inputs_target.get('''attention_mask''' )
if decoder_attention_mask is not None:
__SCREAMING_SNAKE_CASE : Tuple = decoder_attention_mask
return inputs
def UpperCAmelCase__ ( self : Tuple , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : bool = False , _A : Union[bool, str, PaddingStrategy] = False , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : Optional[bool] = None , _A : Optional[Union[str, TensorType]] = None , **_A : str , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = isinstance(_A , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
__SCREAMING_SNAKE_CASE : int = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__SCREAMING_SNAKE_CASE : Tuple = [np.asarray(_A , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
__SCREAMING_SNAKE_CASE : Any = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE : Tuple = speech.astype(np.floataa )
# always return batch
if not is_batched:
__SCREAMING_SNAKE_CASE : Optional[int] = [speech]
# needed to make pad() work on spectrogram inputs
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_size
# convert into correct format for padding
if is_target:
__SCREAMING_SNAKE_CASE : Tuple = [self._extract_mel_features(_A ) for waveform in speech]
__SCREAMING_SNAKE_CASE : Tuple = BatchFeature({'''input_values''': features} )
__SCREAMING_SNAKE_CASE : Any = self.num_mel_bins
else:
__SCREAMING_SNAKE_CASE : Dict = BatchFeature({'''input_values''': speech} )
__SCREAMING_SNAKE_CASE : Dict = self.pad(
_A , padding=_A , max_length=_A , truncation=_A , pad_to_multiple_of=_A , return_attention_mask=_A , **_A , )
__SCREAMING_SNAKE_CASE : List[Any] = feature_size_hack
# convert input values to correct format
__SCREAMING_SNAKE_CASE : str = padded_inputs['''input_values''']
if not isinstance(input_values[0] , np.ndarray ):
__SCREAMING_SNAKE_CASE : Any = [np.asarray(_A , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(_A , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
__SCREAMING_SNAKE_CASE : List[Any] = [array.astype(np.floataa ) for array in input_values]
elif isinstance(_A , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE : Any = input_values.astype(np.floataa )
# convert attention_mask to correct format
__SCREAMING_SNAKE_CASE : List[str] = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(_A , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
__SCREAMING_SNAKE_CASE : Optional[Any] = (
attention_mask
if self._get_padding_strategies(_A , max_length=_A ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__SCREAMING_SNAKE_CASE : List[str] = self.zero_mean_unit_var_norm(
padded_inputs['''input_values'''] , attention_mask=_A , padding_value=self.padding_value )
if return_tensors is not None:
__SCREAMING_SNAKE_CASE : str = padded_inputs.convert_to_tensors(_A )
return padded_inputs
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = super().to_dict()
# Don't serialize these as they are derived from the other properties.
__SCREAMING_SNAKE_CASE : int = ['''window''', '''mel_filters''', '''sample_size''', '''sample_stride''', '''n_fft''', '''n_freqs''']
for name in names:
if name in output:
del output[name]
return output
| 74 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = 1
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = (32, 32)
SCREAMING_SNAKE_CASE_ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowerCAmelCase )
return image
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def lowerCAmelCase_ ( self : Tuple ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def lowerCAmelCase_ ( self : Optional[int] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , )
return RobertaSeriesModelWithTransformation(_lowerCAmelCase )
@property
def lowerCAmelCase_ ( self : List[Any] ):
def extract(*_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : str ):
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : str ):
SCREAMING_SNAKE_CASE_ = torch.ones([0] )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : int ):
self.pixel_values.to(_lowerCAmelCase )
return self
return Out()
return extract
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ = self.dummy_cond_unet
SCREAMING_SNAKE_CASE_ = PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.dummy_vae
SCREAMING_SNAKE_CASE_ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
SCREAMING_SNAKE_CASE_ = 77
SCREAMING_SNAKE_CASE_ = self.dummy_image.to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_ = AltDiffusionImgaImgPipeline(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = alt_pipe.to(_lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE_ = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = alt_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = alt_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , image=_lowerCAmelCase , return_dict=_lowerCAmelCase , )[0]
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = self.dummy_cond_unet
SCREAMING_SNAKE_CASE_ = PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.dummy_vae
SCREAMING_SNAKE_CASE_ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
SCREAMING_SNAKE_CASE_ = 77
SCREAMING_SNAKE_CASE_ = self.dummy_image.to(_lowerCAmelCase )
# put models in fp16
SCREAMING_SNAKE_CASE_ = unet.half()
SCREAMING_SNAKE_CASE_ = vae.half()
SCREAMING_SNAKE_CASE_ = bert.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE_ = AltDiffusionImgaImgPipeline(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE_ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = alt_pipe.to(_lowerCAmelCase )
alt_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = 'A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = alt_pipe(
[prompt] , generator=_lowerCAmelCase , num_inference_steps=2 , output_type='np' , image=_lowerCAmelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
# resize to resolution that is divisible by 8 but not 16 or 32
SCREAMING_SNAKE_CASE_ = init_image.resize((760, 504) )
SCREAMING_SNAKE_CASE_ = 'BAAI/AltDiffusion'
SCREAMING_SNAKE_CASE_ = AltDiffusionImgaImgPipeline.from_pretrained(
_lowerCAmelCase , safety_checker=_lowerCAmelCase , )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ = 'A fantasy landscape, trending on artstation'
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=_lowerCAmelCase , output_type='np' , )
SCREAMING_SNAKE_CASE_ = output.images[0]
SCREAMING_SNAKE_CASE_ = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
SCREAMING_SNAKE_CASE_ = init_image.resize((768, 512) )
SCREAMING_SNAKE_CASE_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy' )
SCREAMING_SNAKE_CASE_ = 'BAAI/AltDiffusion'
SCREAMING_SNAKE_CASE_ = AltDiffusionImgaImgPipeline.from_pretrained(
_lowerCAmelCase , safety_checker=_lowerCAmelCase , )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE_ = 'A fantasy landscape, trending on artstation'
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=_lowerCAmelCase , output_type='np' , )
SCREAMING_SNAKE_CASE_ = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2 | 31 | 0 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
A__ : Any =tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
A__ : Optional[int] =tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
A__ : int =tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023] , dtype=tf.floataa , ) # expected non filtered values as noted above
A__ : Union[str, Any] =tf_top_k_top_p_filtering(_a , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
A__ : Tuple =output[output != -float("""inf""" )]
A__ : List[Any] =tf.cast(
tf.where(tf.not_equal(_a , tf.constant(-float("""inf""" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(_a , _a , rtol=1e-12 )
tf.debugging.assert_equal(_a , _a )
@require_tf
class lowerCamelCase ( unittest.TestCase , UpperCamelCase__ ):
'''simple docstring'''
if is_tf_available():
__snake_case = {
'AutoModelForCausalLM': TFAutoModelForCausalLM,
'AutoModelForSpeechSeq2Seq': TFAutoModelForSpeechSeqaSeq,
'AutoModelForSeq2SeqLM': TFAutoModelForSeqaSeqLM,
'AutoModelForVision2Seq': TFAutoModelForVisionaSeq,
'LogitsProcessorList': TFLogitsProcessorList,
'MinLengthLogitsProcessor': TFMinLengthLogitsProcessor,
'create_tensor_fn': tf.convert_to_tensor,
'floats_tensor': floats_tensor,
'return_tensors': 'tf',
}
@slow
def lowercase__ ( self : str ) -> List[Any]:
'''simple docstring'''
# TF-only test: tf.saved_model export
A__ : Optional[Any] =TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
A__ : int =2
A__ : int =2
class lowerCamelCase ( tf.Module ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase_ : str ) -> List[str]:
'''simple docstring'''
super(_a , self ).__init__()
A__ : int =model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=_a , )
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str] ) -> Any:
'''simple docstring'''
A__ : Any =self.model.generate(
input_ids=_a , attention_mask=_a , max_new_tokens=_a , return_dict_in_generate=_a , )
return {"sequences": outputs["sequences"]}
A__ : int =[[2, 0], [1_02, 1_03]]
A__ : Any =[[1, 0], [1, 1]]
A__ : Optional[int] =DummyModel(model=_a )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_a , _a , signatures={"""serving_default""": dummy_model.serving} )
A__ : Union[str, Any] =tf.saved_model.load(_a ).signatures["""serving_default"""]
for batch_size in range(1 , len(_a ) + 1 ):
A__ : List[Any] ={
"""input_ids""": tf.constant(dummy_input_ids[:batch_size] ),
"""attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ),
}
A__ : List[Any] =serving_func(**_a )["""sequences"""]
A__ : List[Any] =test_model.generate(**_a , max_new_tokens=_a )
tf.debugging.assert_equal(_a , _a )
@slow
def lowercase__ ( self : Dict ) -> str:
'''simple docstring'''
# TF-only test: tf.saved_model export
A__ : Tuple =TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
A__ : Union[str, Any] =1
A__ : str =2
class lowerCamelCase ( tf.Module ):
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase_ : str ) -> str:
'''simple docstring'''
super(_a , self ).__init__()
A__ : Optional[Any] =model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=_a , )
def lowercase__ ( self : str , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str] ) -> Any:
'''simple docstring'''
A__ : Tuple =self.model.generate(
input_ids=_a , attention_mask=_a , max_new_tokens=_a , return_dict_in_generate=_a , )
return {"sequences": outputs["sequences"]}
A__ : Optional[Any] =[[2], [1_02, 1_03]]
A__ : List[Any] =[[1], [1, 1]]
A__ : List[Any] =DummyModel(model=_a )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(_a , _a , signatures={"""serving_default""": dummy_model.serving} )
A__ : Optional[int] =tf.saved_model.load(_a ).signatures["""serving_default"""]
for input_row in range(len(_a ) ):
A__ : List[str] ={
"""input_ids""": tf.constant([dummy_input_ids[input_row]] ),
"""attention_mask""": tf.constant([dummy_attention_masks[input_row]] ),
}
A__ : int =serving_func(**_a )["""sequences"""]
A__ : str =test_model.generate(**_a , max_new_tokens=_a )
tf.debugging.assert_equal(_a , _a )
@slow
@require_tensorflow_text
def lowercase__ ( self : Dict ) -> Tuple:
'''simple docstring'''
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=_a )
class lowerCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
super().__init__()
A__ : str =text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(_a , """spiece.model""" ) , """rb""" ).read() )
A__ : str =TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : Tuple , *lowerCAmelCase_ : int , **lowerCAmelCase_ : List[Any] ) -> str:
'''simple docstring'''
A__ : Any =self.tokenizer.tokenize(_a )
A__ : Any =text.pad_model_inputs(
_a , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
A__ : Any =self.model.generate(input_ids=_a , attention_mask=_a )
return self.tokenizer.detokenize(_a )
A__ : Optional[int] =CompleteSentenceTransformer()
A__ : Dict =tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""" )
A__ : List[str] =complete_model(_a )
A__ : List[Any] =tf.keras.Model(_a , _a )
keras_model.save(_a )
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
# Has PT equivalent: this test relies on random sampling
A__ : Optional[int] ={
"""do_sample""": True,
"""num_beams""": 1,
"""top_p""": 0.7,
"""top_k""": 10,
"""temperature""": 0.7,
}
A__ : Any =14
A__ : Optional[int] =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
A__ : str ="""Hello, my dog is cute and"""
A__ : Optional[Any] =tokenizer(_a , return_tensors="""tf""" )
A__ : List[str] =TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
A__ : Optional[Any] =6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
A__ : Dict =model.generate(**_a , eos_token_id=_a , **_a )
self.assertTrue(expectation == len(generated_tokens[0] ) )
A__ : int =[6_38, 1_98]
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
A__ : int =model.generate(**_a , eos_token_id=_a , **_a )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
# Has PT equivalent: ample use of framework-specific code
A__ : Any =AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
A__ : Tuple ="""Hugging Face is a technology company based in New York and Paris."""
A__ : Union[str, Any] =bart_tokenizer(_a , return_tensors="""tf""" ).input_ids
A__ : Dict =TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
A__ : Optional[Any] =bart_model.generate(_a ).numpy()
class lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
def lowercase__ ( self : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str]=None , **lowerCAmelCase_ : Optional[Any] ) -> Dict:
'''simple docstring'''
return super().call(_a , **_a )
A__ : Union[str, Any] =FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
A__ : List[str] =bart_model.generate(_a , foo="""bar""" ).numpy()
self.assertTrue(np.array_equal(_a , _a ) )
class lowerCamelCase ( bart_model.model.encoder.__class__ ):
'''simple docstring'''
def lowercase__ ( self : str , lowerCAmelCase_ : Any , **lowerCAmelCase_ : Dict ) -> Union[str, Any]:
'''simple docstring'''
return super().call(_a , **_a )
A__ : Optional[int] =FakeEncoder(bart_model.config , bart_model.model.shared )
A__ : Optional[int] =fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
A__ : Optional[int] =bart_model.generate(_a ).numpy()
with self.assertRaises(_a ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(_a , foo="""bar""" )
| 710 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def __lowerCamelCase ( __snake_case : Tuple, __snake_case : List[Any] ) -> str:
"""simple docstring"""
A__ : Optional[int] =[]
for part_id in partition_order:
A__ : int =df.where(f"SPARK_PARTITION_ID() = {part_id}" ).collect()
for row_idx, row in enumerate(__snake_case ):
expected_row_ids_and_row_dicts.append((f"{part_id}_{row_idx}", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ : List[str] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : str =spark.range(100 ).repartition(1 )
A__ : List[str] =Spark(__snake_case )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
A__ : List[str] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : Tuple =spark.range(10 ).repartition(2 )
A__ : List[str] =[1, 0]
A__ : Tuple =_generate_iterable_examples(__snake_case, __snake_case ) # Reverse the partitions.
A__ : Dict =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, __snake_case )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
A__ , A__ : Union[str, Any] =expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ : Any =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : Union[str, Any] =spark.range(10 ).repartition(1 )
A__ : List[str] =SparkExamplesIterable(__snake_case )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(__snake_case ):
assert row_id == f"0_{i}"
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
A__ : List[str] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : Union[str, Any] =spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("""numpy.random.Generator""" ) as generator_mock:
A__ : Tuple =lambda __snake_case : x.reverse()
A__ : List[str] =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, [2, 1, 0] )
A__ : Union[str, Any] =SparkExamplesIterable(__snake_case ).shuffle_data_sources(__snake_case )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(__snake_case ):
A__ , A__ : List[Any] =expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
A__ : List[Any] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : Any =spark.range(20 ).repartition(4 )
# Partitions 0 and 2
A__ : str =SparkExamplesIterable(__snake_case ).shard_data_sources(worker_id=0, num_workers=2 )
assert shard_it_a.n_shards == 2
A__ : Any =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, [0, 2] )
for i, (row_id, row_dict) in enumerate(__snake_case ):
A__ , A__ : Dict =expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
A__ : Union[str, Any] =SparkExamplesIterable(__snake_case ).shard_data_sources(worker_id=1, num_workers=2 )
assert shard_it_a.n_shards == 2
A__ : Union[str, Any] =_get_expected_row_ids_and_row_dicts_for_partition_order(__snake_case, [1, 3] )
for i, (row_id, row_dict) in enumerate(__snake_case ):
A__ , A__ : Optional[int] =expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
A__ : Optional[int] =pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
A__ : List[str] =spark.range(100 ).repartition(1 )
A__ : List[Any] =Spark(__snake_case )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 687 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 662 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = torch.nn.Linear(10 , 10 )
__lowercase = torch.optim.SGD(model.parameters() , 0.1 )
__lowercase = Accelerator()
__lowercase = accelerator.prepare(_lowerCAmelCase )
try:
pickle.loads(pickle.dumps(_lowerCAmelCase ) )
except Exception as e:
self.fail(F'Accelerated optimizer pickling failed with {e}' )
AcceleratorState._reset_state()
| 80 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[Any] = logging.get_logger(__name__)
a : Dict = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'realm'
def __init__( self , A=30522 , A=768 , A=128 , A=12 , A=12 , A=8 , A=3072 , A="gelu_new" , A=0.1 , A=0.1 , A=512 , A=2 , A=0.0_2 , A=1e-12 , A=256 , A=10 , A=1e-3 , A=5 , A=320 , A=13353718 , A=5000 , A=1 , A=0 , A=2 , **A , ) -> Tuple:
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
# Common config
UpperCAmelCase : Optional[Any] = vocab_size
UpperCAmelCase : Optional[int] = max_position_embeddings
UpperCAmelCase : Any = hidden_size
UpperCAmelCase : str = retriever_proj_size
UpperCAmelCase : List[Any] = num_hidden_layers
UpperCAmelCase : Optional[Any] = num_attention_heads
UpperCAmelCase : Any = num_candidates
UpperCAmelCase : List[str] = intermediate_size
UpperCAmelCase : Dict = hidden_act
UpperCAmelCase : str = hidden_dropout_prob
UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Tuple = type_vocab_size
UpperCAmelCase : Any = layer_norm_eps
# Reader config
UpperCAmelCase : Union[str, Any] = span_hidden_size
UpperCAmelCase : List[Any] = max_span_width
UpperCAmelCase : str = reader_layer_norm_eps
UpperCAmelCase : Tuple = reader_beam_size
UpperCAmelCase : Any = reader_seq_len
# Retrieval config
UpperCAmelCase : Tuple = num_block_records
UpperCAmelCase : Dict = searcher_beam_size
| 706 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 672 | 0 |
'''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 3 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=False , lowercase_=False , lowercase_=2 , lowercase_=9_9 , lowercase_=0 , lowercase_=3_2 , lowercase_=5 , lowercase_=4 , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=1_2 , lowercase_=2 , lowercase_=0.0_2 , lowercase_=3 , lowercase_=4 , lowercase_="last" , lowercase_=None , lowercase_=None , ) -> str:
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_lengths
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = gelu_activation
UpperCAmelCase = sinusoidal_embeddings
UpperCAmelCase = causal
UpperCAmelCase = asm
UpperCAmelCase = n_langs
UpperCAmelCase = vocab_size
UpperCAmelCase = n_special
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = summary_type
UpperCAmelCase = use_proj
UpperCAmelCase = scope
def a_ ( self ) -> int:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
if self.use_input_lengths:
UpperCAmelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , 2 ).float()
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def a_ ( self ) -> Union[str, Any]:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def a_ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> List[str]:
UpperCAmelCase = FlaubertModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ , lengths=lowercase_ , langs=lowercase_ )
UpperCAmelCase = model(lowercase_ , langs=lowercase_ )
UpperCAmelCase = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Any:
UpperCAmelCase = FlaubertWithLMHeadModel(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> List[str]:
UpperCAmelCase = FlaubertForQuestionAnsweringSimple(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ )
UpperCAmelCase = model(lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a_ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> int:
UpperCAmelCase = FlaubertForQuestionAnswering(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ )
UpperCAmelCase = model(
lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , cls_index=lowercase_ , is_impossible=lowercase_ , p_mask=lowercase_ , )
UpperCAmelCase = model(
lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , cls_index=lowercase_ , is_impossible=lowercase_ , )
((UpperCAmelCase) , ) = result_with_labels.to_tuple()
UpperCAmelCase = model(lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ )
((UpperCAmelCase) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def a_ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Dict:
UpperCAmelCase = FlaubertForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ )
UpperCAmelCase = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a_ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> List[str]:
UpperCAmelCase = self.num_labels
UpperCAmelCase = FlaubertForTokenClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Tuple:
UpperCAmelCase = self.num_choices
UpperCAmelCase = FlaubertForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a_ ( self ) -> Union[str, Any]:
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'lengths': input_lengths,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : str = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def a_ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def a_ ( self , lowercase_ , lowercase_ , lowercase_=False ) -> List[str]:
UpperCAmelCase = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
return inputs_dict
def a_ ( self ) -> List[str]:
UpperCAmelCase = FlaubertModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowercase_ , emb_dim=3_7 )
def a_ ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def a_ ( self ) -> Dict:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowercase_ )
def a_ ( self ) -> int:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowercase_ )
def a_ ( self ) -> Any:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowercase_ )
def a_ ( self ) -> str:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowercase_ )
def a_ ( self ) -> Union[str, Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowercase_ )
def a_ ( self ) -> Tuple:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowercase_ )
def a_ ( self ) -> List[str]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowercase_ )
@slow
def a_ ( self ) -> List[str]:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = FlaubertModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@slow
@require_torch_gpu
def a_ ( self ) -> Optional[Any]:
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
UpperCAmelCase = True
UpperCAmelCase = model_class(config=lowercase_ )
UpperCAmelCase = self._prepare_for_class(lowercase_ , lowercase_ )
UpperCAmelCase = torch.jit.trace(
lowercase_ , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase_ , os.path.join(lowercase_ , 'traced_model.pt' ) )
UpperCAmelCase = torch.jit.load(os.path.join(lowercase_ , 'traced_model.pt' ) , map_location=lowercase_ )
loaded(inputs_dict['input_ids'].to(lowercase_ ) , inputs_dict['attention_mask'].to(lowercase_ ) )
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
@slow
def a_ ( self ) -> List[Any]:
UpperCAmelCase = FlaubertModel.from_pretrained('flaubert/flaubert_base_cased' )
UpperCAmelCase = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
with torch.no_grad():
UpperCAmelCase = model(lowercase_ )[0]
UpperCAmelCase = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , lowercase_ )
UpperCAmelCase = torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1E-4 ) )
| 373 | 0 |
from __future__ import annotations
from typing import Any
def A__ ( __lowerCamelCase ):
create_state_space_tree(lowercase_, [], 0 )
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if index == len(lowercase_ ):
print(lowercase_ )
return
create_state_space_tree(lowercase_, lowercase_, index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(lowercase_, lowercase_, index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
__UpperCAmelCase = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["A", "B", "C"])
generate_all_subsequences(seq)
| 720 |
from PIL import Image
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = image.size
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = image.load()
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(__lowerCamelCase ):
for i in range(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = 2_55 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
__UpperCAmelCase = mean_threshold(Image.open("path_to_image").convert("L"))
image.save("output_image_path")
| 597 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase :Optional[int] = logging.get_logger(__name__)
def __snake_case ( _UpperCamelCase ) -> Optional[int]:
_a = DPTConfig()
if "large" in checkpoint_url:
_a = 10_24
_a = 40_96
_a = 24
_a = 16
_a = [5, 11, 17, 23]
_a = [2_56, 5_12, 10_24, 10_24]
_a = (1, 3_84, 3_84)
if "ade" in checkpoint_url:
_a = True
_a = 1_50
_a = '''huggingface/label-files'''
_a = '''ade20k-id2label.json'''
_a = json.load(open(cached_download(hf_hub_url(_UpperCamelCase , _UpperCamelCase , repo_type='''dataset''' ) ) , '''r''' ) )
_a = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
_a = idalabel
_a = {v: k for k, v in idalabel.items()}
_a = [1, 1_50, 4_80, 4_80]
return config, expected_shape
def __snake_case ( _UpperCamelCase ) -> List[str]:
_a = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
def __snake_case ( _UpperCamelCase ) -> Optional[int]:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_a = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
_a = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
_a = name.replace('''patch_embed''' , '''patch_embeddings''' )
if "pos_embed" in name:
_a = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
_a = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
_a = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
_a = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
_a = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_a = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name:
_a = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_a = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
_a = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
_a = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
_a = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
_a = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
_a = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
_a = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
_a = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_a = name.replace(f"refinenet{layer_idx}" , f"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
_a = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
_a = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
_a = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
_a = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
_a = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_a = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
_a = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
_a = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
_a = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_a = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
_a = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
_a = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
_a = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
_a = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
_a = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
_a = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
_a = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
_a = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
_a = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
_a = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
_a = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
return name
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> Tuple:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_a = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.weight" )
_a = state_dict.pop(f"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_a = in_proj_weight[: config.hidden_size, :]
_a = in_proj_bias[: config.hidden_size]
_a = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_a = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_a = in_proj_weight[
-config.hidden_size :, :
]
_a = in_proj_bias[-config.hidden_size :]
def __snake_case ( ) -> Any:
_a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_a = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
@torch.no_grad()
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
_a , _a = get_dpt_config(_UpperCamelCase )
# load original state_dict from URL
_a = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(_UpperCamelCase )
# rename keys
for key in state_dict.copy().keys():
_a = state_dict.pop(_UpperCamelCase )
_a = val
# read in qkv matrices
read_in_q_k_v(_UpperCamelCase , _UpperCamelCase )
# load HuggingFace model
_a = DPTForSemanticSegmentation(_UpperCamelCase ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(_UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
model.eval()
# Check outputs on an image
_a = 4_80 if '''ade''' in checkpoint_url else 3_84
_a = DPTImageProcessor(size=_UpperCamelCase )
_a = prepare_img()
_a = image_processor(_UpperCamelCase , return_tensors='''pt''' )
# forward pass
_a = model(**_UpperCamelCase ).logits if '''ade''' in checkpoint_url else model(**_UpperCamelCase ).predicted_depth
# Assert logits
_a = torch.tensor([[6.31_99, 6.36_29, 6.41_48], [6.38_50, 6.36_15, 6.41_66], [6.35_19, 6.31_76, 6.35_75]] )
if "ade" in checkpoint_url:
_a = torch.tensor([[4.04_80, 4.24_20, 4.43_60], [4.31_24, 4.56_93, 4.82_61], [4.57_68, 4.89_65, 5.21_63]] )
assert outputs.shape == torch.Size(_UpperCamelCase )
assert (
torch.allclose(outputs[0, 0, :3, :3] , _UpperCamelCase , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , _UpperCamelCase )
)
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCamelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_UpperCamelCase )
if push_to_hub:
print('''Pushing model to hub...''' )
model.push_to_hub(
repo_path_or_name=Path(_UpperCamelCase , _UpperCamelCase ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=_UpperCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_UpperCamelCase , _UpperCamelCase ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=_UpperCamelCase , )
if __name__ == "__main__":
lowerCamelCase :Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
lowerCamelCase :Tuple = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 487 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def __snake_case ( _UpperCamelCase , _UpperCamelCase = True , _UpperCamelCase = math.inf , _UpperCamelCase = -math.inf , _UpperCamelCase = math.inf , _UpperCamelCase = -math.inf , _UpperCamelCase = False , _UpperCamelCase = 1_00 , _UpperCamelCase = 0.01 , _UpperCamelCase = 1 , ) -> Any:
_a = False
_a = search_prob
_a = start_temperate
_a = []
_a = 0
_a = None
while not search_end:
_a = current_state.score()
if best_state is None or current_score > best_state.score():
_a = current_state
scores.append(_UpperCamelCase )
iterations += 1
_a = None
_a = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_a = random.randint(0 , len(_UpperCamelCase ) - 1 ) # picking a random neighbor
_a = neighbors.pop(_UpperCamelCase )
_a = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_a = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_a = picked_neighbor
else:
_a = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_a = picked_neighbor
_a = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_a = True
else:
_a = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_UpperCamelCase ) , _UpperCamelCase )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> int:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCamelCase :Optional[int] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCamelCase :List[str] = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
# starting the problem with initial coordinates (12, 47)
lowerCamelCase :Dict = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCamelCase :int = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F'''and 50 > y > - 5 found via hill climbing: {local_min.score()}'''
)
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
return (3 * x**2) - (6 * y)
lowerCamelCase :Optional[int] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCamelCase :List[Any] = simulated_annealing(prob, find_max=False, visualization=True)
print(
'The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F'''{local_min.score()}'''
)
lowerCamelCase :Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCamelCase :Union[str, Any] = simulated_annealing(prob, find_max=True, visualization=True)
print(
'The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F'''{local_min.score()}'''
)
| 487 | 1 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__snake_case :List[Any] =logging.get_logger(__name__)
__snake_case :Optional[int] ='▁'
__snake_case :List[Any] ={
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
__snake_case :Optional[Any] ={
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
__snake_case :Dict ={
'facebook/m2m100_418M': 1024,
}
# fmt: off
__snake_case :Optional[Any] ={
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : Any = VOCAB_FILES_NAMES
A_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
A_ : Optional[int] = ['input_ids', 'attention_mask']
A_ : List[int] = []
A_ : List[int] = []
def __init__( self : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str]=None , __UpperCamelCase : Any=None , __UpperCamelCase : List[str]="<s>" , __UpperCamelCase : int="</s>" , __UpperCamelCase : Tuple="</s>" , __UpperCamelCase : Dict="<pad>" , __UpperCamelCase : int="<unk>" , __UpperCamelCase : str="m2m100" , __UpperCamelCase : Optional[Dict[str, Any]] = None , __UpperCamelCase : Dict=8 , **__UpperCamelCase : Optional[int] , ) -> None:
A = {} if sp_model_kwargs is None else sp_model_kwargs
A = language_codes
A = FAIRSEQ_LANGUAGE_CODES[language_codes]
A = {lang_code: f'''__{lang_code}__''' for lang_code in fairseq_language_code}
A = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(__UpperCamelCase )
for lang_code in fairseq_language_code
if self.get_lang_token(__UpperCamelCase ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__UpperCamelCase , tgt_lang=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , sep_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , language_codes=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__UpperCamelCase , **__UpperCamelCase , )
A = vocab_file
A = load_json(__UpperCamelCase )
A = {v: k for k, v in self.encoder.items()}
A = spm_file
A = load_spm(__UpperCamelCase , self.sp_model_kwargs )
A = len(self.encoder )
A = {
self.get_lang_token(__UpperCamelCase ): self.encoder_size + i for i, lang_code in enumerate(__UpperCamelCase )
}
A = {lang_code: self.encoder_size + i for i, lang_code in enumerate(__UpperCamelCase )}
A = {v: k for k, v in self.lang_token_to_id.items()}
A = src_lang if src_lang is not None else 'en'
A = tgt_lang
A = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
A = num_madeup_words
@property
def __UpperCamelCase ( self : Tuple ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def __UpperCamelCase ( self : Optional[int] ) -> str:
return self._src_lang
@src_lang.setter
def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : str ) -> None:
A = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCamelCase ( self : List[str] , __UpperCamelCase : str ) -> List[str]:
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def __UpperCamelCase ( self : int , __UpperCamelCase : Any ) -> List[str]:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(__UpperCamelCase , self.encoder[self.unk_token] )
def __UpperCamelCase ( self : int , __UpperCamelCase : int ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(__UpperCamelCase , self.unk_token )
def __UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Dict ) -> Dict:
A = []
A = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__UpperCamelCase ) + token
A = []
else:
current_sub_tokens.append(__UpperCamelCase )
out_string += self.sp_model.decode(__UpperCamelCase )
return out_string.strip()
def __UpperCamelCase ( self : Dict , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
A = [1] * len(self.prefix_tokens )
A = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__UpperCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(__UpperCamelCase )) + ([0] * len(__UpperCamelCase )) + suffix_ones
def __UpperCamelCase ( self : str , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase ( self : str ) -> Dict:
A = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ) -> Dict:
A = self.__dict__.copy()
A = None
return state
def __setstate__( self : Optional[int] , __UpperCamelCase : Dict ) -> None:
A = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
A = {}
A = load_spm(self.spm_file , self.sp_model_kwargs )
def __UpperCamelCase ( self : List[Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
A = Path(__UpperCamelCase )
if not save_dir.is_dir():
raise OSError(f'''{save_directory} should be a directory''' )
A = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
A = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , __UpperCamelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __UpperCamelCase )
elif not os.path.isfile(self.spm_file ):
with open(__UpperCamelCase , 'wb' ) as fi:
A = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (str(__UpperCamelCase ), str(__UpperCamelCase ))
def __UpperCamelCase ( self : str , __UpperCamelCase : List[str] , __UpperCamelCase : str = "en" , __UpperCamelCase : Optional[List[str]] = None , __UpperCamelCase : str = "ro" , **__UpperCamelCase : Any , ) -> BatchEncoding:
A = src_lang
A = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(__UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
def __UpperCamelCase ( self : int , __UpperCamelCase : Dict , __UpperCamelCase : Optional[str] , __UpperCamelCase : Optional[str] , **__UpperCamelCase : str ) -> str:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
A = src_lang
A = self(__UpperCamelCase , add_special_tokens=__UpperCamelCase , **__UpperCamelCase )
A = self.get_lang_id(__UpperCamelCase )
A = tgt_lang_id
return inputs
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
self.set_src_lang_special_tokens(self.src_lang )
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : str ) -> None:
A = self.get_lang_token(__UpperCamelCase )
A = self.lang_token_to_id[lang_token]
A = [self.cur_lang_id]
A = [self.eos_token_id]
def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : str ) -> None:
A = self.get_lang_token(__UpperCamelCase )
A = self.lang_token_to_id[lang_token]
A = [self.cur_lang_id]
A = [self.eos_token_id]
def __UpperCamelCase ( self : Dict , __UpperCamelCase : str ) -> str:
return self.lang_code_to_token[lang]
def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : str ) -> int:
A = self.get_lang_token(__UpperCamelCase )
return self.lang_token_to_id[lang_token]
def lowerCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
'''simple docstring'''
A = sentencepiece.SentencePieceProcessor(**lowerCAmelCase__ )
spm.Load(str(lowerCAmelCase__ ) )
return spm
def lowerCamelCase_ ( lowerCAmelCase__ : str ) -> Union[Dict, List]:
'''simple docstring'''
with open(lowerCAmelCase__ , 'r' ) as f:
return json.load(lowerCAmelCase__ )
def lowerCamelCase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : str ) -> None:
'''simple docstring'''
with open(lowerCAmelCase__ , 'w' ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ , indent=2 ) | 224 |
from __future__ import annotations
def lowerCamelCase_ ( lowerCAmelCase__ : list[float] ) -> bool:
'''simple docstring'''
if len(lowerCAmelCase__ ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
A = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod() | 224 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : Any = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
a : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 633 |
'''simple docstring'''
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
__snake_case : int = logging.get_logger(__name__) # pylint: disable=invalid-name
class A ( a ):
def __init__( self , snake_case_ , snake_case_=7_6_8 ) -> Optional[int]:
super().__init__(snake_case_ )
_a = proj_size
_a = CLIPVisionModel(snake_case_ )
_a = PaintByExampleMapper(snake_case_ )
_a = nn.LayerNorm(config.hidden_size )
_a = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
_a = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def __lowerCAmelCase ( self , snake_case_ , snake_case_=False ) -> Any:
_a = self.model(pixel_values=snake_case_ )
_a = clip_output.pooler_output
_a = self.mapper(latent_states[:, None] )
_a = self.final_layer_norm(snake_case_ )
_a = self.proj_out(snake_case_ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class A ( nn.Module ):
def __init__( self , snake_case_ ) -> Tuple:
super().__init__()
_a = (config.num_hidden_layers + 1) // 5
_a = config.hidden_size
_a = 1
_a = nn.ModuleList(
[
BasicTransformerBlock(snake_case_ , snake_case_ , snake_case_ , activation_fn="gelu" , attention_bias=snake_case_ )
for _ in range(snake_case_ )
] )
def __lowerCAmelCase ( self , snake_case_ ) -> Tuple:
for block in self.blocks:
_a = block(snake_case_ )
return hidden_states
| 131 | 0 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self: Optional[int] , __lowerCAmelCase: List[Any] , __lowerCAmelCase: str=13 , __lowerCAmelCase: Optional[int]=32 , __lowerCAmelCase: int=2 , __lowerCAmelCase: List[str]=3 , __lowerCAmelCase: Optional[Any]=16 , __lowerCAmelCase: str=[1, 2, 1] , __lowerCAmelCase: Tuple=[2, 2, 4] , __lowerCAmelCase: Union[str, Any]=2 , __lowerCAmelCase: str=2.0 , __lowerCAmelCase: Tuple=True , __lowerCAmelCase: Optional[Any]=0.0 , __lowerCAmelCase: Any=0.0 , __lowerCAmelCase: Tuple=0.1 , __lowerCAmelCase: Optional[Any]="gelu" , __lowerCAmelCase: int=False , __lowerCAmelCase: List[str]=True , __lowerCAmelCase: List[Any]=0.02 , __lowerCAmelCase: List[str]=1E-5 , __lowerCAmelCase: str=True , __lowerCAmelCase: int=None , __lowerCAmelCase: str=True , __lowerCAmelCase: Optional[Any]=10 , __lowerCAmelCase: Optional[int]=8 , __lowerCAmelCase: Optional[int]=["stage1", "stage2", "stage3"] , __lowerCAmelCase: Any=[1, 2, 3] , ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = image_size
__UpperCAmelCase = patch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = embed_dim
__UpperCAmelCase = depths
__UpperCAmelCase = num_heads
__UpperCAmelCase = window_size
__UpperCAmelCase = mlp_ratio
__UpperCAmelCase = qkv_bias
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = drop_path_rate
__UpperCAmelCase = hidden_act
__UpperCAmelCase = use_absolute_embeddings
__UpperCAmelCase = patch_norm
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = initializer_range
__UpperCAmelCase = is_training
__UpperCAmelCase = scope
__UpperCAmelCase = use_labels
__UpperCAmelCase = type_sequence_label_size
__UpperCAmelCase = encoder_stride
__UpperCAmelCase = out_features
__UpperCAmelCase = out_indices
def _UpperCAmelCase ( self: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self: Dict ) -> List[Any]:
'''simple docstring'''
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _UpperCAmelCase ( self: Dict , __lowerCAmelCase: Optional[int] , __lowerCAmelCase: Any , __lowerCAmelCase: List[str] ) -> int:
'''simple docstring'''
__UpperCAmelCase = MaskFormerSwinModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__UpperCAmelCase = model(__lowerCAmelCase )
__UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _UpperCAmelCase ( self: Dict , __lowerCAmelCase: Optional[int] , __lowerCAmelCase: Optional[Any] , __lowerCAmelCase: int ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase = MaskFormerSwinBackbone(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__UpperCAmelCase = model(__lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(__lowerCAmelCase ):
__UpperCAmelCase = ["stem"]
__UpperCAmelCase = MaskFormerSwinBackbone(config=__lowerCAmelCase )
def _UpperCAmelCase ( self: Optional[Any] ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = config_and_inputs
__UpperCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : Any = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ : Tuple = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
lowerCAmelCase__ : Union[str, Any] = False
lowerCAmelCase__ : List[Any] = False
lowerCAmelCase__ : Union[str, Any] = False
lowerCAmelCase__ : Optional[int] = False
lowerCAmelCase__ : List[Any] = False
def _UpperCAmelCase ( self: Tuple ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = MaskFormerSwinModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
) )
def _UpperCAmelCase ( self: str ) -> int:
'''simple docstring'''
pass
def _UpperCAmelCase ( self: Any ) -> Union[str, Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self: str ) -> int:
'''simple docstring'''
return
def _UpperCAmelCase ( self: Union[str, Any] ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _UpperCAmelCase ( self: Optional[Any] ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCAmelCase )
@unittest.skip("Swin does not use inputs_embeds" )
def _UpperCAmelCase ( self: Any ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip("Swin does not support feedforward chunking" )
def _UpperCAmelCase ( self: str ) -> Tuple:
'''simple docstring'''
pass
def _UpperCAmelCase ( self: Any ) -> str:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(__lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCAmelCase , nn.Linear ) )
def _UpperCAmelCase ( self: Any ) -> str:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(__lowerCAmelCase )
__UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase = [*signature.parameters.keys()]
__UpperCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" )
def _UpperCAmelCase ( self: Optional[int] ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" )
def _UpperCAmelCase ( self: str ) -> Dict:
'''simple docstring'''
pass
def _UpperCAmelCase ( self: Optional[int] , __lowerCAmelCase: Tuple , __lowerCAmelCase: Dict , __lowerCAmelCase: List[Any] , __lowerCAmelCase: Any ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
__UpperCAmelCase = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
__UpperCAmelCase = outputs.hidden_states
__UpperCAmelCase = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase )
# Swin has a different seq_length
__UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _UpperCAmelCase ( self: Optional[Any] ) -> Dict:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__UpperCAmelCase = True
self.check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase = True
self.check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _UpperCAmelCase ( self: Optional[int] ) -> Any:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = 3
__UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__UpperCAmelCase = True
self.check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase = True
self.check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , (padded_height, padded_width) )
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" )
def _UpperCAmelCase ( self: Optional[Any] ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def _UpperCAmelCase ( self: List[Any] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def _UpperCAmelCase ( self: str ) -> List[str]:
'''simple docstring'''
pass
def _UpperCAmelCase ( self: Optional[int] ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(__lowerCAmelCase: Optional[Any] ):
__UpperCAmelCase = 0
return t
def check_equivalence(__lowerCAmelCase: str , __lowerCAmelCase: str , __lowerCAmelCase: int , __lowerCAmelCase: int={} ):
with torch.no_grad():
__UpperCAmelCase = model(**__lowerCAmelCase , return_dict=__lowerCAmelCase , **__lowerCAmelCase )
__UpperCAmelCase = model(**__lowerCAmelCase , return_dict=__lowerCAmelCase , **__lowerCAmelCase ).to_tuple()
def recursive_check(__lowerCAmelCase: int , __lowerCAmelCase: List[Any] ):
if isinstance(__lowerCAmelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__lowerCAmelCase , __lowerCAmelCase ):
recursive_check(__lowerCAmelCase , __lowerCAmelCase )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(__lowerCAmelCase , __lowerCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(__lowerCAmelCase ) , set_nan_tensor_to_zero(__lowerCAmelCase ) , atol=1E-5 ) , msg=(
"Tuple and dict output are not equal. Difference:"
F''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
F''' {torch.isnan(__lowerCAmelCase ).any()} and `inf`: {torch.isinf(__lowerCAmelCase )}. Dict has'''
F''' `nan`: {torch.isnan(__lowerCAmelCase ).any()} and `inf`: {torch.isinf(__lowerCAmelCase )}.'''
) , )
recursive_check(__lowerCAmelCase , __lowerCAmelCase )
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__UpperCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
__UpperCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
check_equivalence(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__UpperCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
__UpperCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
check_equivalence(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__UpperCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
__UpperCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
check_equivalence(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , {"output_hidden_states": True} )
__UpperCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
__UpperCAmelCase = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
check_equivalence(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , {"output_hidden_states": True} )
@require_torch
class UpperCAmelCase__ ( unittest.TestCase , snake_case ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowerCAmelCase__ : List[Any] = MaskFormerSwinConfig
def _UpperCAmelCase ( self: Optional[Any] ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = MaskFormerSwinModelTester(self )
def _UpperCAmelCase ( self: Union[str, Any] ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
__UpperCAmelCase = backbone_class(__lowerCAmelCase )
backbone.to(__lowerCAmelCase )
backbone.eval()
__UpperCAmelCase = backbone(**__lowerCAmelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , __lowerCAmelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__UpperCAmelCase = backbone(**__lowerCAmelCase , output_hidden_states=__lowerCAmelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__UpperCAmelCase = backbone(**__lowerCAmelCase , output_attentions=__lowerCAmelCase )
self.assertIsNotNone(outputs.attentions )
| 286 | import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase__ ( snake_case ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = ['image_processor', 'tokenizer']
lowerCAmelCase__ : Any = 'CLIPImageProcessor'
lowerCAmelCase__ : Any = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self: str , __lowerCAmelCase: List[Any]=None , __lowerCAmelCase: Any=None , **__lowerCAmelCase: str ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __lowerCAmelCase , )
__UpperCAmelCase = kwargs.pop("feature_extractor" )
__UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
def __call__( self: Optional[int] , __lowerCAmelCase: Union[str, Any]=None , __lowerCAmelCase: Dict=None , __lowerCAmelCase: Optional[int]=None , **__lowerCAmelCase: Optional[int] ) -> Dict:
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
__UpperCAmelCase = self.tokenizer(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if images is not None:
__UpperCAmelCase = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase )
if text is not None and images is not None:
__UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowerCAmelCase ) , tensor_type=__lowerCAmelCase )
def _UpperCAmelCase ( self: str , *__lowerCAmelCase: str , **__lowerCAmelCase: List[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def _UpperCAmelCase ( self: Any , *__lowerCAmelCase: List[Any] , **__lowerCAmelCase: Dict ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def _UpperCAmelCase ( self: Tuple ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer.model_input_names
__UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _UpperCAmelCase ( self: Optional[int] ) -> str:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __lowerCAmelCase , )
return self.image_processor_class
@property
def _UpperCAmelCase ( self: List[Any] ) -> List[str]:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __lowerCAmelCase , )
return self.image_processor
| 286 | 1 |
"""simple docstring"""
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class __UpperCAmelCase ( yaml.SafeLoader ):
def UpperCAmelCase ( self : Union[str, Any] , a_ : int ) -> List[str]:
'''simple docstring'''
a__ : Any = [self.constructed_objects[key_node] for key_node, _ in node.value]
a__ : Optional[Any] = [tuple(a_ ) if isinstance(a_ , a_ ) else key for key in keys]
a__ : str = Counter(a_ )
a__ : Optional[int] = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"Got duplicate yaml keys: {duplicate_keys}" )
def UpperCAmelCase ( self : str , a_ : List[Any] , a_ : Optional[Any]=False ) -> int:
'''simple docstring'''
a__ : Tuple = super().construct_mapping(a_ , deep=a_ )
self._check_no_duplicates_on_constructed_node(a_ )
return mapping
def lowercase__ ( lowerCAmelCase__ : str ) -> List[Any]:
'''simple docstring'''
a__ : List[str] = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
a__ : str = full_content[1:].index("---" ) + 1
a__ : Optional[Any] = '''\n'''.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(__snake_case )
class __UpperCAmelCase ( A__ ):
__lowerCamelCase : List[Any] = {"train_eval_index"} # train-eval-index in the YAML metadata
@classmethod
def UpperCAmelCase ( cls : Tuple , a_ : Path ) -> Tuple:
'''simple docstring'''
with open(a_ , encoding="utf-8" ) as readme_file:
a__ : Optional[int] = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(a_ )
else:
return cls()
def UpperCAmelCase ( self : Dict , a_ : Path ) -> Union[str, Any]:
'''simple docstring'''
if path.exists():
with open(a_ , encoding="utf-8" ) as readme_file:
a__ : List[str] = readme_file.read()
else:
a__ : Optional[Any] = None
a__ : List[Any] = self._to_readme(a_ )
with open(a_ , "w" , encoding="utf-8" ) as readme_file:
readme_file.write(a_ )
def UpperCAmelCase ( self : str , a_ : Optional[str] = None ) -> Optional[int]:
'''simple docstring'''
if readme_content is not None:
a__ : str = _split_yaml_from_readme(a_ )
a__ : int = '''---\n''' + self.to_yaml_string() + '''---\n''' + content
else:
a__ : Any = '''---\n''' + self.to_yaml_string() + '''---\n'''
return full_content
@classmethod
def UpperCAmelCase ( cls : Dict , a_ : str ) -> List[str]:
'''simple docstring'''
a__ : Union[str, Any] = yaml.load(a_ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
a__ : Tuple = {
(key.replace("-" , "_" ) if key.replace("-" , "_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**a_ )
def UpperCAmelCase ( self : str ) -> Tuple:
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace("_" , "-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=a_ , allow_unicode=a_ , encoding="utf-8" , ).decode("utf-8" )
__UpperCAmelCase = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__UpperCAmelCase = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
__UpperCAmelCase = ap.parse_args()
__UpperCAmelCase = Path(args.readme_filepath)
__UpperCAmelCase = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath) | 642 |
"""simple docstring"""
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class _snake_case ( A__ ):
'''simple docstring'''
UpperCamelCase__ =(CMStochasticIterativeScheduler,)
UpperCamelCase__ =10
def snake_case_ ( self : Tuple , **snake_case : Any ):
UpperCAmelCase_ :Tuple = {
'''num_train_timesteps''': 201,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
config.update(**snake_case )
return config
def snake_case_ ( self : int ):
UpperCAmelCase_ :List[Any] = 10
UpperCAmelCase_ :Union[str, Any] = self.get_scheduler_config()
UpperCAmelCase_ :Any = self.scheduler_classes[0](**snake_case )
scheduler.set_timesteps(snake_case )
UpperCAmelCase_ :Union[str, Any] = scheduler.timesteps[0]
UpperCAmelCase_ :int = scheduler.timesteps[1]
UpperCAmelCase_ :Dict = self.dummy_sample
UpperCAmelCase_ :Dict = 0.1 * sample
UpperCAmelCase_ :List[str] = scheduler.step(snake_case , snake_case , snake_case ).prev_sample
UpperCAmelCase_ :List[str] = scheduler.step(snake_case , snake_case , snake_case ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def snake_case_ ( self : Union[str, Any] ):
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=snake_case )
def snake_case_ ( self : Dict ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=snake_case )
def snake_case_ ( self : Any ):
UpperCAmelCase_ :Union[str, Any] = self.scheduler_classes[0]
UpperCAmelCase_ :List[str] = self.get_scheduler_config()
UpperCAmelCase_ :Any = scheduler_class(**snake_case )
UpperCAmelCase_ :Tuple = 1
scheduler.set_timesteps(snake_case )
UpperCAmelCase_ :Union[str, Any] = scheduler.timesteps
UpperCAmelCase_ :Tuple = torch.manual_seed(0 )
UpperCAmelCase_ :Dict = self.dummy_model()
UpperCAmelCase_ :List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(snake_case ):
# 1. scale model input
UpperCAmelCase_ :int = scheduler.scale_model_input(snake_case , snake_case )
# 2. predict noise residual
UpperCAmelCase_ :Optional[int] = model(snake_case , snake_case )
# 3. predict previous sample x_t-1
UpperCAmelCase_ :List[str] = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample
UpperCAmelCase_ :Dict = pred_prev_sample
UpperCAmelCase_ :Any = torch.sum(torch.abs(snake_case ) )
UpperCAmelCase_ :Tuple = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 192.7_614 ) < 1e-2
assert abs(result_mean.item() - 0.2_510 ) < 1e-3
def snake_case_ ( self : Optional[int] ):
UpperCAmelCase_ :List[Any] = self.scheduler_classes[0]
UpperCAmelCase_ :Tuple = self.get_scheduler_config()
UpperCAmelCase_ :Dict = scheduler_class(**snake_case )
UpperCAmelCase_ :Union[str, Any] = [106, 0]
scheduler.set_timesteps(timesteps=snake_case )
UpperCAmelCase_ :Union[str, Any] = scheduler.timesteps
UpperCAmelCase_ :Dict = torch.manual_seed(0 )
UpperCAmelCase_ :Optional[Any] = self.dummy_model()
UpperCAmelCase_ :int = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
UpperCAmelCase_ :str = scheduler.scale_model_input(snake_case , snake_case )
# 2. predict noise residual
UpperCAmelCase_ :Tuple = model(snake_case , snake_case )
# 3. predict previous sample x_t-1
UpperCAmelCase_ :Optional[int] = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample
UpperCAmelCase_ :int = pred_prev_sample
UpperCAmelCase_ :List[str] = torch.sum(torch.abs(snake_case ) )
UpperCAmelCase_ :int = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 347.6_357 ) < 1e-2
assert abs(result_mean.item() - 0.4_527 ) < 1e-3
def snake_case_ ( self : Optional[int] ):
UpperCAmelCase_ :Union[str, Any] = self.scheduler_classes[0]
UpperCAmelCase_ :int = self.get_scheduler_config()
UpperCAmelCase_ :str = scheduler_class(**snake_case )
UpperCAmelCase_ :int = [39, 30, 12, 15, 0]
with self.assertRaises(snake_case , msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=snake_case )
def snake_case_ ( self : List[str] ):
UpperCAmelCase_ :Union[str, Any] = self.scheduler_classes[0]
UpperCAmelCase_ :List[str] = self.get_scheduler_config()
UpperCAmelCase_ :List[Any] = scheduler_class(**snake_case )
UpperCAmelCase_ :Dict = [39, 30, 12, 1, 0]
UpperCAmelCase_ :str = len(snake_case )
with self.assertRaises(snake_case , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=snake_case , timesteps=snake_case )
def snake_case_ ( self : Any ):
UpperCAmelCase_ :Dict = self.scheduler_classes[0]
UpperCAmelCase_ :int = self.get_scheduler_config()
UpperCAmelCase_ :int = scheduler_class(**snake_case )
UpperCAmelCase_ :Union[str, Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
snake_case , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=snake_case )
| 608 | 0 |
'''simple docstring'''
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase="shi-labs/oneformer_demo" ):
with open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="dataset" ) , "r" ) as f:
_UpperCAmelCase : str = json.load(lowerCAmelCase__ )
_UpperCAmelCase : str = {}
_UpperCAmelCase : Dict = []
_UpperCAmelCase : Optional[Any] = []
for key, info in class_info.items():
_UpperCAmelCase : Union[str, Any] = info["name"]
class_names.append(info["name"] )
if info["isthing"]:
thing_ids.append(int(lowerCAmelCase__ ) )
_UpperCAmelCase : Optional[Any] = thing_ids
_UpperCAmelCase : str = class_names
return metadata
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Any , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any=7 , lowerCamelCase__ : int=3 , lowerCamelCase__ : Dict=30 , lowerCamelCase__ : Optional[Any]=4_00 , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : str=True , lowerCamelCase__ : Optional[Any]=[0.5, 0.5, 0.5] , lowerCamelCase__ : Tuple=[0.5, 0.5, 0.5] , lowerCamelCase__ : List[str]=10 , lowerCamelCase__ : List[str]=False , lowerCamelCase__ : Tuple=2_55 , lowerCamelCase__ : Tuple="shi-labs/oneformer_demo" , lowerCamelCase__ : Tuple="ade20k_panoptic.json" , lowerCamelCase__ : List[Any]=10 , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Any = parent
_UpperCAmelCase : Optional[int] = batch_size
_UpperCAmelCase : str = num_channels
_UpperCAmelCase : Optional[Any] = min_resolution
_UpperCAmelCase : int = max_resolution
_UpperCAmelCase : Optional[Any] = do_resize
_UpperCAmelCase : Union[str, Any] = {"shortest_edge": 32, "longest_edge": 13_33} if size is None else size
_UpperCAmelCase : Any = do_normalize
_UpperCAmelCase : Optional[Any] = image_mean
_UpperCAmelCase : Optional[Any] = image_std
_UpperCAmelCase : int = class_info_file
_UpperCAmelCase : str = prepare_metadata(snake_case__ , snake_case__ )
_UpperCAmelCase : List[str] = num_text
_UpperCAmelCase : Tuple = repo_path
# for the post_process_functions
_UpperCAmelCase : Union[str, Any] = 2
_UpperCAmelCase : int = 10
_UpperCAmelCase : Dict = 10
_UpperCAmelCase : Tuple = 3
_UpperCAmelCase : Optional[Any] = 4
_UpperCAmelCase : str = num_labels
_UpperCAmelCase : List[Any] = do_reduce_labels
_UpperCAmelCase : List[str] = ignore_index
def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any]=False ) ->List[Any]:
'''simple docstring'''
if not batched:
_UpperCAmelCase : Optional[int] = image_inputs[0]
if isinstance(snake_case__ , Image.Image ):
_UpperCAmelCase , _UpperCAmelCase : Tuple = image.size
else:
_UpperCAmelCase , _UpperCAmelCase : Dict = image.shape[1], image.shape[2]
if w < h:
_UpperCAmelCase : Union[str, Any] = int(self.size["shortest_edge"] * h / w )
_UpperCAmelCase : List[str] = self.size["shortest_edge"]
elif w > h:
_UpperCAmelCase : Tuple = self.size["shortest_edge"]
_UpperCAmelCase : str = int(self.size["shortest_edge"] * w / h )
else:
_UpperCAmelCase : Any = self.size["shortest_edge"]
_UpperCAmelCase : List[Any] = self.size["shortest_edge"]
else:
_UpperCAmelCase : List[Any] = []
for image in image_inputs:
_UpperCAmelCase , _UpperCAmelCase : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_UpperCAmelCase : str = max(snake_case__ , key=lambda lowerCamelCase__ : item[0] )[0]
_UpperCAmelCase : Optional[Any] = max(snake_case__ , key=lambda lowerCamelCase__ : item[1] )[1]
return expected_height, expected_width
def lowerCAmelCase__ ( self : Tuple ) ->str:
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __a , unittest.TestCase ):
lowerCAmelCase : List[str] = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
lowerCAmelCase : List[str] = image_processing_class
def lowerCAmelCase__ ( self : int ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = OneFormerImageProcessorTester(self )
@property
def lowerCAmelCase__ ( self : Tuple ) ->Optional[Any]:
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "image_mean" ) )
self.assertTrue(hasattr(snake_case__ , "image_std" ) )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "do_resize" ) )
self.assertTrue(hasattr(snake_case__ , "size" ) )
self.assertTrue(hasattr(snake_case__ , "ignore_index" ) )
self.assertTrue(hasattr(snake_case__ , "class_info_file" ) )
self.assertTrue(hasattr(snake_case__ , "num_text" ) )
self.assertTrue(hasattr(snake_case__ , "repo_path" ) )
self.assertTrue(hasattr(snake_case__ , "metadata" ) )
self.assertTrue(hasattr(snake_case__ , "do_reduce_labels" ) )
def lowerCAmelCase__ ( self : str ) ->int:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : str ) ->int:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : List[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
_UpperCAmelCase : str = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase : List[Any] = self.image_processing_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase : int = self.image_processing_tester.get_expected_values(snake_case__ , batched=snake_case__ )
_UpperCAmelCase : List[str] = image_processor(
snake_case__ , ["semantic"] * len(snake_case__ ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase : Tuple = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
# Test not batched input
_UpperCAmelCase : Optional[Any] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase : List[str] = self.image_processing_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.image_processing_tester.get_expected_values(snake_case__ , batched=snake_case__ )
_UpperCAmelCase : str = image_processor(
snake_case__ , ["semantic"] * len(snake_case__ ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self : Any ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase : Any = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
_UpperCAmelCase : List[str] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase : Tuple = self.image_processing_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase : Any = self.image_processing_tester.get_expected_values(snake_case__ , batched=snake_case__ )
_UpperCAmelCase : List[Any] = image_processor(
snake_case__ , ["semantic"] * len(snake_case__ ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Any=False , lowerCamelCase__ : Dict=False , lowerCamelCase__ : Union[str, Any]="np" ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
_UpperCAmelCase : Tuple = self.image_processing_tester.num_labels
_UpperCAmelCase : Optional[Any] = None
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case__ )
if with_segmentation_maps:
_UpperCAmelCase : Optional[Any] = num_labels
if is_instance_map:
_UpperCAmelCase : Optional[int] = list(range(snake_case__ ) ) * 2
_UpperCAmelCase : int = dict(enumerate(snake_case__ ) )
_UpperCAmelCase : Union[str, Any] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
_UpperCAmelCase : str = [Image.fromarray(snake_case__ ) for annotation in annotations]
_UpperCAmelCase : Optional[Any] = image_processor(
snake_case__ , ["semantic"] * len(snake_case__ ) , snake_case__ , return_tensors="pt" , instance_id_to_semantic_id=snake_case__ , pad_and_return_pixel_mask=snake_case__ , )
return inputs
def lowerCAmelCase__ ( self : Dict ) ->List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
def common(lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : Dict=None ):
_UpperCAmelCase : Union[str, Any] = self.comm_get_image_processor_inputs(
with_segmentation_maps=snake_case__ , is_instance_map=snake_case__ , segmentation_type=snake_case__ )
_UpperCAmelCase : List[Any] = inputs["mask_labels"]
_UpperCAmelCase : Optional[Any] = inputs["class_labels"]
_UpperCAmelCase : str = inputs["pixel_values"]
_UpperCAmelCase : Optional[Any] = inputs["text_inputs"]
# check the batch_size
for mask_label, class_label, text_input in zip(snake_case__ , snake_case__ , snake_case__ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(snake_case__ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=snake_case__ )
common(is_instance_map=snake_case__ , segmentation_type="pil" )
common(is_instance_map=snake_case__ , segmentation_type="pil" )
def lowerCAmelCase__ ( self : List[str] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = np.zeros((20, 50) )
_UpperCAmelCase : Dict = 1
_UpperCAmelCase : Any = 1
_UpperCAmelCase : Union[str, Any] = 1
_UpperCAmelCase : str = binary_mask_to_rle(snake_case__ )
self.assertEqual(len(snake_case__ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def lowerCAmelCase__ ( self : Dict ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
_UpperCAmelCase : Any = self.image_processing_tester.get_fake_oneformer_outputs()
_UpperCAmelCase : Tuple = fature_extractor.post_process_semantic_segmentation(snake_case__ )
self.assertEqual(len(snake_case__ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
_UpperCAmelCase : str = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
_UpperCAmelCase : Any = fature_extractor.post_process_semantic_segmentation(snake_case__ , target_sizes=snake_case__ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def lowerCAmelCase__ ( self : Any ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
_UpperCAmelCase : Tuple = self.image_processing_tester.get_fake_oneformer_outputs()
_UpperCAmelCase : Optional[int] = image_processor.post_process_instance_segmentation(snake_case__ , threshold=0 )
self.assertTrue(len(snake_case__ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , snake_case__ )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : str = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
_UpperCAmelCase : str = self.image_processing_tester.get_fake_oneformer_outputs()
_UpperCAmelCase : Any = image_processor.post_process_panoptic_segmentation(snake_case__ , threshold=0 )
self.assertTrue(len(snake_case__ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , snake_case__ )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 705 |
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowerCamelCase__ = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
lowerCamelCase__ = {
'169M': 768,
'430M': 1_024,
'1B5': 2_048,
'3B': 2_560,
'7B': 4_096,
'14B': 5_120,
}
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[str] = list(state_dict.keys() )
for name in state_dict_keys:
_UpperCAmelCase : Optional[int] = state_dict.pop(__lowerCAmelCase )
# emb -> embedding
if name.startswith("emb." ):
_UpperCAmelCase : Tuple = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
_UpperCAmelCase : Optional[int] = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
_UpperCAmelCase : Union[str, Any] = re.sub(R"blocks\.(\d+)\.att" , R"blocks.\1.attention" , __lowerCAmelCase )
# ffn -> feed_forward
_UpperCAmelCase : Dict = re.sub(R"blocks\.(\d+)\.ffn" , R"blocks.\1.feed_forward" , __lowerCAmelCase )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
_UpperCAmelCase : int = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
_UpperCAmelCase : Union[str, Any] = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
_UpperCAmelCase : int = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
_UpperCAmelCase : List[str] = "rwkv." + name
_UpperCAmelCase : Optional[Any] = weight
return state_dict
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=None ):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
_UpperCAmelCase : str = 50_277
_UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
_UpperCAmelCase : Tuple = PreTrainedTokenizerFast(tokenizer_file=__lowerCAmelCase )
_UpperCAmelCase : List[Any] = len(__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
# 2. Build the config
_UpperCAmelCase : Optional[int] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_UpperCAmelCase : Optional[Any] = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
_UpperCAmelCase : Any = RwkvConfig(
vocab_size=__lowerCAmelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__lowerCAmelCase )
# 3. Download model file then convert state_dict
_UpperCAmelCase : str = hf_hub_download(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Optional[int] = torch.load(__lowerCAmelCase , map_location="cpu" )
_UpperCAmelCase : Any = convert_state_dict(__lowerCAmelCase )
# 4. Split in shards and save
_UpperCAmelCase , _UpperCAmelCase : List[str] = shard_checkpoint(__lowerCAmelCase )
for shard_file, shard in shards.items():
torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
if index is not None:
_UpperCAmelCase : int = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
# Save the index as well
with open(__lowerCAmelCase , "w" , encoding="utf-8" ) as f:
_UpperCAmelCase : int = json.dumps(__lowerCAmelCase , indent=2 , sort_keys=__lowerCAmelCase ) + "\n"
f.write(__lowerCAmelCase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
_UpperCAmelCase : Union[str, Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_UpperCAmelCase : Union[str, Any] = torch.load(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
_UpperCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(__lowerCAmelCase )
model.push_to_hub(__lowerCAmelCase , max_shard_size="2GB" )
tokenizer.push_to_hub(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
lowerCamelCase__ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 40 | 0 |
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
SCREAMING_SNAKE_CASE__ = 2_5_6_0_4_7
SCREAMING_SNAKE_CASE__ = 2_5_6_1_4_5
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : int = NllbTokenizer
A__ : Optional[int] = NllbTokenizerFast
A__ : Optional[int] = True
A__ : Tuple = True
A__ : Optional[int] = {}
def _a ( self : Any ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
A__ = NllbTokenizer(_snake_case , keep_accents=_snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def _a ( self : Optional[int] ):
"""simple docstring"""
A__ = NllbTokenizer(_snake_case , keep_accents=_snake_case )
A__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(_snake_case , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_snake_case ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
A__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
A__ = tokenizer.convert_tokens_to_ids(_snake_case )
self.assertListEqual(
_snake_case , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
A__ = tokenizer.convert_ids_to_tokens(_snake_case )
self.assertListEqual(
_snake_case , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def _a ( self : Any ):
"""simple docstring"""
A__ = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-nllb', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A__ = self.rust_tokenizer_class.from_pretrained(_snake_case , **_snake_case )
A__ = self.tokenizer_class.from_pretrained(_snake_case , **_snake_case )
A__ = tempfile.mkdtemp()
A__ = tokenizer_r.save_pretrained(_snake_case )
A__ = tokenizer_p.save_pretrained(_snake_case )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
A__ = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(_snake_case , _snake_case )
# Checks everything loads correctly in the same way
A__ = tokenizer_r.from_pretrained(_snake_case )
A__ = tokenizer_p.from_pretrained(_snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_snake_case , _snake_case ) )
shutil.rmtree(_snake_case )
# Save tokenizer rust, legacy_format=True
A__ = tempfile.mkdtemp()
A__ = tokenizer_r.save_pretrained(_snake_case , legacy_format=_snake_case )
A__ = tokenizer_p.save_pretrained(_snake_case )
# Checks it save with the same files
self.assertSequenceEqual(_snake_case , _snake_case )
# Checks everything loads correctly in the same way
A__ = tokenizer_r.from_pretrained(_snake_case )
A__ = tokenizer_p.from_pretrained(_snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_snake_case , _snake_case ) )
shutil.rmtree(_snake_case )
# Save tokenizer rust, legacy_format=False
A__ = tempfile.mkdtemp()
A__ = tokenizer_r.save_pretrained(_snake_case , legacy_format=_snake_case )
A__ = tokenizer_p.save_pretrained(_snake_case )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
A__ = tokenizer_r.from_pretrained(_snake_case )
A__ = tokenizer_p.from_pretrained(_snake_case )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_snake_case , _snake_case ) )
shutil.rmtree(_snake_case )
@require_torch
def _a ( self : str ):
"""simple docstring"""
if not self.test_seqaseq:
return
A__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Longer text that will definitely require truncation.
A__ = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'
' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'
' will only worsen the violence and misery for millions of people.',
]
A__ = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'
' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'
' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
try:
A__ = tokenizer.prepare_seqaseq_batch(
src_texts=_snake_case , tgt_texts=_snake_case , max_length=3 , max_target_length=10 , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='ron_Latn' , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
A__ = tokenizer.prepare_seqaseq_batch(
_snake_case , tgt_texts=_snake_case , max_length=3 , return_tensors='pt' )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
A__ = tokenizer.prepare_seqaseq_batch(
src_texts=_snake_case , max_length=3 , max_target_length=10 , return_tensors='pt' )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn('decoder_input_ids' , _snake_case )
@unittest.skip('Unfortunately way too slow to build a BPE with SentencePiece.' )
def _a ( self : Dict ):
"""simple docstring"""
pass
def _a ( self : List[Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A__ = [AddedToken('<special>' , lstrip=_snake_case )]
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , additional_special_tokens=_snake_case , **_snake_case )
A__ = tokenizer_r.encode('Hey this is a <special> token' )
A__ = tokenizer_r.encode('<special>' , add_special_tokens=_snake_case )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
A__ = self.rust_tokenizer_class.from_pretrained(
_snake_case , additional_special_tokens=_snake_case , **_snake_case , )
A__ = self.tokenizer_class.from_pretrained(
_snake_case , additional_special_tokens=_snake_case , **_snake_case )
A__ = tokenizer_p.encode('Hey this is a <special> token' )
A__ = tokenizer_cr.encode('Hey this is a <special> token' )
self.assertEqual(_snake_case , _snake_case )
self.assertEqual(_snake_case , _snake_case )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : int = "facebook/nllb-200-distilled-600M"
A__ : List[Any] = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
A__ : Dict = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
A__ : List[Any] = [
25_60_47,
1_62_97,
13_44_08,
81_65,
24_80_66,
1_47_34,
9_50,
11_35,
10_57_21,
35_73,
83,
2_73_52,
1_08,
4_94_86,
2,
]
@classmethod
def _a ( cls : Tuple ):
"""simple docstring"""
A__ = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='eng_Latn' , tgt_lang='ron_Latn' )
A__ = 1
return cls
def _a ( self : Optional[Any] ):
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Arab'] , 25_60_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ace_Latn'] , 25_60_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['fra_Latn'] , 25_60_57 )
def _a ( self : str ):
"""simple docstring"""
A__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _snake_case )
def _a ( self : Tuple ):
"""simple docstring"""
self.assertIn(_snake_case , self.tokenizer.all_special_ids )
# fmt: off
A__ = [RO_CODE, 42_54, 9_80_68, 11_29_23, 3_90_72, 39_09, 7_13, 10_27_67, 26, 1_73_14, 3_56_42, 1_46_83, 3_31_18, 20_22, 6_69_87, 2, 25_60_47]
# fmt: on
A__ = self.tokenizer.decode(_snake_case , skip_special_tokens=_snake_case )
A__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_snake_case )
self.assertEqual(_snake_case , _snake_case )
self.assertNotIn(self.tokenizer.eos_token , _snake_case )
def _a ( self : Dict ):
"""simple docstring"""
A__ = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , _snake_case )
A__ = 10
A__ = self.tokenizer(_snake_case , max_length=_snake_case , truncation=_snake_case ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , _snake_case )
self.assertEqual(len(_snake_case ) , _snake_case )
def _a ( self : str ):
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [25_62_03, 3] )
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = tempfile.mkdtemp()
A__ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_snake_case )
A__ = NllbTokenizer.from_pretrained(_snake_case )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _snake_case )
@require_torch
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_snake_case , truncation=_snake_case , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
A__ = shift_tokens_right(
batch['labels'] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['ron_Latn'] )
self.assertIsInstance(_snake_case , _snake_case )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
A__ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _snake_case )
self.assertEqual(_snake_case , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = self.tokenizer(self.src_text , padding=_snake_case , truncation=_snake_case , max_length=3 , return_tensors='pt' )
A__ = self.tokenizer(
text_target=self.tgt_text , padding=_snake_case , truncation=_snake_case , max_length=10 , return_tensors='pt' )
A__ = targets['input_ids']
A__ = shift_tokens_right(
_snake_case , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _a ( self : str ):
"""simple docstring"""
A__ = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
nested_simplify(_snake_case ) , {
# A, test, EOS, en_XX
'input_ids': [[25_60_47, 70, 73_56, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 25_60_57,
} , )
@require_torch
def _a ( self : List[str] ):
"""simple docstring"""
A__ = True
A__ = self.tokenizer(
'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
inputs.input_ids , [1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2, 25_60_47] )
A__ = False
A__ = self.tokenizer(
'UN Chief says there is no military solution in Syria' , src_lang='eng_Latn' , tgt_lang='fra_Latn' )
self.assertEqual(
inputs.input_ids , [25_60_47, 1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2] )
| 9 |
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A :
def __init__( self , lowerCamelCase__ , lowerCamelCase__=3 , lowerCamelCase__=32 , lowerCamelCase__=3 , lowerCamelCase__=10 , lowerCamelCase__=[10, 20, 30, 40] , lowerCamelCase__=[1, 1, 2, 1] , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__="relu" , lowerCamelCase__=3 , lowerCamelCase__=None , ) -> List[str]:
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = embeddings_size
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_act
lowercase__ = num_labels
lowercase__ = scope
lowercase__ = len(lowerCamelCase__ )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels
def A__ ( self ) -> Any:
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ = RegNetModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowercase__ = model(lowerCamelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = RegNetForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowercase__ = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Optional[Any] = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
lowerCamelCase : Any = (
{"""feature-extraction""": RegNetModel, """image-classification""": RegNetForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase : str = False
lowerCamelCase : Optional[int] = False
lowerCamelCase : str = False
lowerCamelCase : Union[str, Any] = False
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ = RegNetModelTester(self )
lowercase__ = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self ) -> Any:
'''simple docstring'''
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def A__ ( self ) -> str:
'''simple docstring'''
pass
def A__ ( self ) -> str:
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCamelCase__ )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(config=lowerCamelCase__ )
for name, module in model.named_modules():
if isinstance(lowerCamelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def A__ ( self ) -> Tuple:
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowercase__ = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
lowercase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase__ = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase__ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase__ = layer_type
lowercase__ = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@slow
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = RegNetModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def _A ( ):
lowercase__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def A__ ( self ) -> Dict:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase__ )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=lowerCamelCase__ , return_tensors="""pt""" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
lowercase__ = model(**lowerCamelCase__ )
# verify the logits
lowercase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
lowercase__ = torch.tensor([-0.41_80, -1.50_51, -3.48_36] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 325 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def lowerCAmelCase (__A):
"""simple docstring"""
_a = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
_a = [144, 192, 240]
_a = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
_a = [96, 120, 144]
_a = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
_a = [64, 80, 96]
_a = [16, 16, 24, 48, 64, 80, 320]
_a = 0.05
_a = 2.0
if mobilevit_name.startswith('''deeplabv3_'''):
_a = 512
_a = 16
_a = 21
_a = '''pascal-voc-id2label.json'''
else:
_a = 1_000
_a = '''imagenet-1k-id2label.json'''
_a = '''huggingface/label-files'''
_a = json.load(open(hf_hub_download(__A , __A , repo_type='''dataset''') , '''r'''))
_a = {int(__A): v for k, v in idalabel.items()}
_a = idalabel
_a = {v: k for k, v in idalabel.items()}
return config
def lowerCAmelCase (__A , __A=False):
"""simple docstring"""
for i in range(1 , 6):
if F'''layer_{i}.''' in name:
_a = name.replace(F'''layer_{i}.''' , F'''encoder.layer.{i - 1}.''')
if "conv_1." in name:
_a = name.replace('''conv_1.''' , '''conv_stem.''')
if ".block." in name:
_a = name.replace('''.block.''' , '''.''')
if "exp_1x1" in name:
_a = name.replace('''exp_1x1''' , '''expand_1x1''')
if "red_1x1" in name:
_a = name.replace('''red_1x1''' , '''reduce_1x1''')
if ".local_rep.conv_3x3." in name:
_a = name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''')
if ".local_rep.conv_1x1." in name:
_a = name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''')
if ".norm." in name:
_a = name.replace('''.norm.''' , '''.normalization.''')
if ".conv." in name:
_a = name.replace('''.conv.''' , '''.convolution.''')
if ".conv_proj." in name:
_a = name.replace('''.conv_proj.''' , '''.conv_projection.''')
for i in range(0 , 2):
for j in range(0 , 4):
if F'''.{i}.{j}.''' in name:
_a = name.replace(F'''.{i}.{j}.''' , F'''.{i}.layer.{j}.''')
for i in range(2 , 6):
for j in range(0 , 4):
if F'''.{i}.{j}.''' in name:
_a = name.replace(F'''.{i}.{j}.''' , F'''.{i}.''')
if "expand_1x1" in name:
_a = name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''')
if "conv_3x3" in name:
_a = name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''')
if "reduce_1x1" in name:
_a = name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''')
for i in range(2 , 5):
if F'''.global_rep.{i}.weight''' in name:
_a = name.replace(F'''.global_rep.{i}.weight''' , '''.layernorm.weight''')
if F'''.global_rep.{i}.bias''' in name:
_a = name.replace(F'''.global_rep.{i}.bias''' , '''.layernorm.bias''')
if ".global_rep." in name:
_a = name.replace('''.global_rep.''' , '''.transformer.''')
if ".pre_norm_mha.0." in name:
_a = name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''')
if ".pre_norm_mha.1.out_proj." in name:
_a = name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''')
if ".pre_norm_ffn.0." in name:
_a = name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''')
if ".pre_norm_ffn.1." in name:
_a = name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''')
if ".pre_norm_ffn.4." in name:
_a = name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''')
if ".transformer." in name:
_a = name.replace('''.transformer.''' , '''.transformer.layer.''')
if ".aspp_layer." in name:
_a = name.replace('''.aspp_layer.''' , '''.''')
if ".aspp_pool." in name:
_a = name.replace('''.aspp_pool.''' , '''.''')
if "seg_head." in name:
_a = name.replace('''seg_head.''' , '''segmentation_head.''')
if "segmentation_head.classifier.classifier." in name:
_a = name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''')
if "classifier.fc." in name:
_a = name.replace('''classifier.fc.''' , '''classifier.''')
elif (not base_model) and ("segmentation_head." not in name):
_a = '''mobilevit.''' + name
return name
def lowerCAmelCase (__A , __A , __A=False):
"""simple docstring"""
if base_model:
_a = ''''''
else:
_a = '''mobilevit.'''
for key in orig_state_dict.copy().keys():
_a = orig_state_dict.pop(__A)
if key[:8] == "encoder.":
_a = key[8:]
if "qkv" in key:
_a = key.split('''.''')
_a = int(key_split[0][6:]) - 1
_a = int(key_split[3])
_a = model.get_submodule(F'''{model_prefix}encoder.layer.{layer_num}''')
_a = layer.transformer.layer[transformer_num].attention.attention.all_head_size
_a = (
F'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'''
)
if "weight" in key:
_a = val[:dim, :]
_a = val[dim : dim * 2, :]
_a = val[-dim:, :]
else:
_a = val[:dim]
_a = val[dim : dim * 2]
_a = val[-dim:]
else:
_a = val
return orig_state_dict
def lowerCAmelCase ():
"""simple docstring"""
_a = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_a = Image.open(requests.get(__A , stream=__A).raw)
return im
@torch.no_grad()
def lowerCAmelCase (__A , __A , __A , __A=False):
"""simple docstring"""
_a = get_mobilevit_config(__A)
# load original state_dict
_a = torch.load(__A , map_location='''cpu''')
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_'''):
_a = MobileViTForSemanticSegmentation(__A).eval()
else:
_a = MobileViTForImageClassification(__A).eval()
_a = convert_state_dict(__A , __A)
model.load_state_dict(__A)
# Check outputs on an image, prepared by MobileViTImageProcessor
_a = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32)
_a = image_processor(images=prepare_img() , return_tensors='''pt''')
_a = model(**__A)
_a = outputs.logits
if mobilevit_name.startswith('''deeplabv3_'''):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
_a = torch.tensor(
[
[[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],
[[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],
[[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],
])
elif mobilevit_name == "deeplabv3_mobilevit_xs":
_a = torch.tensor(
[
[[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],
[[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],
[[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],
])
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
_a = torch.tensor(
[
[[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],
])
else:
raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''')
assert torch.allclose(logits[0, :3, :3, :3] , __A , atol=1e-4)
else:
assert logits.shape == (1, 1_000)
if mobilevit_name == "mobilevit_s":
_a = torch.tensor([-0.98_66, 0.23_92, -1.12_41])
elif mobilevit_name == "mobilevit_xs":
_a = torch.tensor([-2.47_61, -0.93_99, -1.95_87])
elif mobilevit_name == "mobilevit_xxs":
_a = torch.tensor([-1.93_64, -1.23_27, -0.46_53])
else:
raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''')
assert torch.allclose(logits[0, :3] , __A , atol=1e-4)
Path(__A).mkdir(exist_ok=__A)
print(F'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''')
model.save_pretrained(__A)
print(F'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(__A)
if push_to_hub:
_a = {
'''mobilevit_s''': '''mobilevit-small''',
'''mobilevit_xs''': '''mobilevit-x-small''',
'''mobilevit_xxs''': '''mobilevit-xx-small''',
'''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''',
'''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''',
'''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''',
}
print('''Pushing to the hub...''')
_a = model_mapping[mobilevit_name]
image_processor.push_to_hub(__A , organization='''apple''')
model.push_to_hub(__A , organization='''apple''')
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--mobilevit_name",
default="mobilevit_s",
type=str,
help=(
"Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"
" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."
),
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowercase_ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 352 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ) -> Optional[int]:
"""simple docstring"""
_a = [[1, 2, 4], [1, 2, 3, 4]]
_a = DisjunctiveConstraint(A )
self.assertTrue(isinstance(dc.token_ids , A ) )
with self.assertRaises(A ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(A ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def a__ (self ) -> Any:
"""simple docstring"""
_a = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(A ):
DisjunctiveConstraint(A ) # fails here
def a__ (self ) -> Dict:
"""simple docstring"""
_a = [[1, 2, 3], [1, 2, 4]]
_a = DisjunctiveConstraint(A )
_a , _a , _a = dc.update(1 )
_a = stepped is True and completed is False and reset is False
self.assertTrue(A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_a , _a , _a = dc.update(2 )
_a = stepped is True and completed is False and reset is False
self.assertTrue(A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_a , _a , _a = dc.update(3 )
_a = stepped is True and completed is True and reset is False
self.assertTrue(A )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
_a = DisjunctiveConstraint(A )
_a , _a , _a = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_a , _a , _a = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_a , _a , _a = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
_a , _a , _a = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
_a , _a , _a = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
_a , _a , _a = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
_a , _a , _a = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 352 | 1 |
'''simple docstring'''
from timeit import timeit
UpperCamelCase_ = {
"""MALAYALAM""": True,
"""String""": False,
"""rotor""": True,
"""level""": True,
"""A""": True,
"""BB""": True,
"""ABC""": False,
"""amanaplanacanalpanama""": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def _lowerCAmelCase ( __magic_name__ : str ) -> bool:
lowercase : Tuple =0
lowercase : Union[str, Any] =len(__magic_name__ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def _lowerCAmelCase ( __magic_name__ : str ) -> bool:
lowercase : List[str] =len(__magic_name__ ) // 2
lowercase : List[str] =len(__magic_name__ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(__magic_name__ ) )
def _lowerCAmelCase ( __magic_name__ : str ) -> bool:
if len(__magic_name__ ) <= 2:
return True
if s[0] == s[len(__magic_name__ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def _lowerCAmelCase ( __magic_name__ : str ) -> bool:
return s == s[::-1]
def _lowerCAmelCase ( __magic_name__ : str ) -> None:
lowercase : int =f'''all({name}(key) is value for key, value in test_data.items())'''
lowercase : Optional[Any] =f'''from __main__ import test_data, {name}'''
lowercase : int =500000
lowercase : List[str] =timeit(stmt=__magic_name__ , setup=__magic_name__ , number=__magic_name__ )
print(f'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f'''{key:21} {value}''')
print("""a man a plan a canal panama""")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("""is_palindrome_slice""")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("""is_palindrome""")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("""is_palindrome_recursive""")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("""is_palindrome_traversal""")
| 92 |
from collections.abc import Iterable
from typing import Generic, TypeVar
lowerCamelCase : Optional[Any] = TypeVar('_T')
class snake_case__ ( Generic[_T] ):
def __init__( self : Tuple , _lowerCamelCase : Iterable[_T] | None = None ):
snake_case__ : list[_T] = list(iterable or [] )
snake_case__ : list[_T] = []
def __len__( self : Tuple ):
return len(self._stacka ) + len(self._stacka )
def __repr__( self : List[Any] ):
return F'''Queue({tuple(self._stacka[::-1] + self._stacka )})'''
def UpperCAmelCase__ ( self : Any , _lowerCamelCase : _T ):
self._stacka.append(_lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ):
snake_case__ : int = self._stacka.pop
snake_case__ : List[str] = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError('Queue is empty' )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 170 | 0 |
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
lowercase_ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
return max(metric_fn(a__ , a__ ) for gt in ground_truths )
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =[line.strip() for line in open(a__ , 'r' ).readlines()]
_lowerCAmelCase =[]
if args.gold_data_mode == "qa":
_lowerCAmelCase =pd.read_csv(a__ , sep='\t' , header=a__ )
for answer_list in data[1]:
_lowerCAmelCase =ast.literal_eval(a__ )
answers.append(a__ )
else:
_lowerCAmelCase =[line.strip() for line in open(a__ , 'r' ).readlines()]
_lowerCAmelCase =[[reference] for reference in references]
_lowerCAmelCase =_lowerCAmelCase =_lowerCAmelCase =0
for prediction, ground_truths in zip(a__ , a__ ):
total += 1
em += metric_max_over_ground_truths(a__ , a__ , a__ )
fa += metric_max_over_ground_truths(a__ , a__ , a__ )
_lowerCAmelCase =100.0 * em / total
_lowerCAmelCase =100.0 * fa / total
logger.info(F'''F1: {fa:.2f}''' )
logger.info(F'''EM: {em:.2f}''' )
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
_lowerCAmelCase =args.k
_lowerCAmelCase =[line.strip() for line in open(a__ , 'r' ).readlines()]
_lowerCAmelCase =[line.strip() for line in open(a__ , 'r' ).readlines()]
_lowerCAmelCase =_lowerCAmelCase =0
for hypo, reference in zip(a__ , a__ ):
_lowerCAmelCase =set(hypo.split('\t' )[:k] )
_lowerCAmelCase =set(reference.split('\t' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
_lowerCAmelCase =100.0 * em / total
logger.info(F'''Precision@{k}: {em: .2f}''' )
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
def strip_title(a__ ):
if title.startswith('"' ):
_lowerCAmelCase =title[1:]
if title.endswith('"' ):
_lowerCAmelCase =title[:-1]
return title
_lowerCAmelCase =rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a__ , return_tensors='pt' , padding=a__ , truncation=a__ , )['input_ids'].to(args.device )
_lowerCAmelCase =rag_model.rag.question_encoder(a__ )
_lowerCAmelCase =question_enc_outputs[0]
_lowerCAmelCase =rag_model.retriever(
a__ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , )
_lowerCAmelCase =rag_model.retriever.index.get_doc_dicts(result.doc_ids )
_lowerCAmelCase =[]
for docs in all_docs:
_lowerCAmelCase =[strip_title(a__ ) for title in docs['title']]
provenance_strings.append('\t'.join(a__ ) )
return provenance_strings
def UpperCamelCase__ ( a__ , a__ , a__ ):
'''simple docstring'''
with torch.no_grad():
_lowerCAmelCase =rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
a__ , return_tensors='pt' , padding=a__ , truncation=a__ )
_lowerCAmelCase =inputs_dict.input_ids.to(args.device )
_lowerCAmelCase =inputs_dict.attention_mask.to(args.device )
_lowerCAmelCase =rag_model.generate( # rag_model overwrites generate
a__ , attention_mask=a__ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=a__ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
_lowerCAmelCase =rag_model.retriever.generator_tokenizer.batch_decode(a__ , skip_special_tokens=a__ )
if args.print_predictions:
for q, a in zip(a__ , a__ ):
logger.info('Q: {} - A: {}'.format(a__ , a__ ) )
return answers
def UpperCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase =argparse.ArgumentParser()
parser.add_argument(
'--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=a__ , help=(
'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'
' model_name_or_path'
) , )
parser.add_argument(
'--index_name' , default=a__ , choices=['exact', 'compressed', 'legacy'] , type=a__ , help='RAG model retriever type' , )
parser.add_argument(
'--index_path' , default=a__ , type=a__ , help='Path to the retrieval index' , )
parser.add_argument('--n_docs' , default=5 , type=a__ , help='Number of retrieved docs' )
parser.add_argument(
'--model_name_or_path' , default=a__ , type=a__ , required=a__ , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , )
parser.add_argument(
'--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=a__ , help=(
'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'
' precision@k.'
) , )
parser.add_argument('--k' , default=1 , type=a__ , help='k for the precision@k calculation' )
parser.add_argument(
'--evaluation_set' , default=a__ , type=a__ , required=a__ , help='Path to a file containing evaluation samples' , )
parser.add_argument(
'--gold_data_path' , default=a__ , type=a__ , required=a__ , help='Path to a tab-separated file with gold samples' , )
parser.add_argument(
'--gold_data_mode' , default='qa' , type=a__ , choices=['qa', 'ans'] , help=(
'Format of the gold data file'
'qa - a single line in the following format: question [tab] answer_list'
'ans - a single line of the gold file contains the expected answer string'
) , )
parser.add_argument(
'--predictions_path' , type=a__ , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , )
parser.add_argument(
'--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , )
parser.add_argument(
'--eval_batch_size' , default=8 , type=a__ , help='Batch size per GPU/CPU for evaluation.' , )
parser.add_argument(
'--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , )
parser.add_argument(
'--num_beams' , default=4 , type=a__ , help='Number of beams to be used when generating answers' , )
parser.add_argument('--min_length' , default=1 , type=a__ , help='Min length of the generated answers' )
parser.add_argument('--max_length' , default=5_0 , type=a__ , help='Max length of the generated answers' )
parser.add_argument(
'--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , )
parser.add_argument(
'--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , )
_lowerCAmelCase =parser.parse_args()
_lowerCAmelCase =torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
return args
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase ={}
if args.model_type is None:
_lowerCAmelCase =infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('rag' ):
_lowerCAmelCase =RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration
_lowerCAmelCase =args.n_docs
if args.index_name is not None:
_lowerCAmelCase =args.index_name
if args.index_path is not None:
_lowerCAmelCase =args.index_path
else:
_lowerCAmelCase =BartForConditionalGeneration
_lowerCAmelCase =(
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('Evaluate the following checkpoints: %s' , a__ )
_lowerCAmelCase =get_scores if args.eval_mode == 'e2e' else get_precision_at_k
_lowerCAmelCase =evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) )
score_fn(a__ , args.predictions_path , args.gold_data_path )
continue
logger.info('***** Running evaluation for {} *****'.format(a__ ) )
logger.info(' Batch size = %d' , args.eval_batch_size )
logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) )
if args.model_type.startswith('rag' ):
_lowerCAmelCase =RagRetriever.from_pretrained(a__ , **a__ )
_lowerCAmelCase =model_class.from_pretrained(a__ , retriever=a__ , **a__ )
model.retriever.init_retrieval()
else:
_lowerCAmelCase =model_class.from_pretrained(a__ , **a__ )
model.to(args.device )
with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file:
_lowerCAmelCase =[]
for line in tqdm(a__ ):
questions.append(line.strip() )
if len(a__ ) == args.eval_batch_size:
_lowerCAmelCase =evaluate_batch_fn(a__ , a__ , a__ )
preds_file.write('\n'.join(a__ ) + '\n' )
preds_file.flush()
_lowerCAmelCase =[]
if len(a__ ) > 0:
_lowerCAmelCase =evaluate_batch_fn(a__ , a__ , a__ )
preds_file.write('\n'.join(a__ ) )
preds_file.flush()
score_fn(a__ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
lowercase_ = get_args()
main(args)
| 710 | '''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def UpperCamelCase__ ( a__ ):
'''simple docstring'''
_lowerCAmelCase =os.path.join(args.tf_model_dir , 'parameters.json' )
_lowerCAmelCase =json.loads(open(a__ ).read() )
if not params:
raise ValueError(
F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith('.pt' ):
_lowerCAmelCase =args.output + '.pt'
_lowerCAmelCase =OrderedDict()
with tf.device('/CPU:0' ):
_lowerCAmelCase =tf.train.load_checkpoint(args.tf_model_dir )
_lowerCAmelCase =reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_lowerCAmelCase =reader.get_tensor(a__ ).astype(np.floataa )
if key_name.endswith('/adam_m' ) or key_name.endswith('/adam_v' ):
continue
if key_name.startswith('pasts/' ):
if key_name.startswith('pasts/mlp' ):
_lowerCAmelCase =int(key_name[9] )
elif key_name.startswith('pasts/out' ):
_lowerCAmelCase =8
_lowerCAmelCase ='model.sqout.%d.weight' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/moe' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/switch_gating/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.router.classifier.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/softmlp/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.soft_bypass_mlp.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/wo/kernel' ) or key_name.endswith('/wi/kernel' ):
_lowerCAmelCase =key_name[-9:-7]
for i in range(1_6 ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight' % (player, i, nlayer)
_lowerCAmelCase =(
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/mlp' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/p1/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wi.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p1/bias' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wi.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p2/kernel' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wo.weight' % player
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/p2/bias' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.mlp.wo.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/ln' ):
_lowerCAmelCase =int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.norm.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/g' ):
_lowerCAmelCase ='model.blocks.%d.feed_forward.norm.weight' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/att' ):
_lowerCAmelCase =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/qkv/kernel' ):
_lowerCAmelCase =vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_lowerCAmelCase =state[:, 0, :, :]
_lowerCAmelCase =state[:, 1, :, :]
_lowerCAmelCase =state[:, 2, :, :]
_lowerCAmelCase =(
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =(
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =(
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.q_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.k_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.v_proj.weight' % player
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/o/kernel' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.self_attn.out_proj.weight' % player
_lowerCAmelCase =(
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/an' ):
_lowerCAmelCase =int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.norm.bias' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.endswith('/g' ):
_lowerCAmelCase ='model.blocks.%d.self_attn.norm.weight' % player
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
elif (
key_name.startswith('model/wte' )
or key_name.startswith('model/wpe' )
or key_name.startswith('model/ete' )
):
_lowerCAmelCase ={'wte': 'embed_tokens', 'wpe': 'position_embeddings', 'ete': 'extra_position_embeddings'}[
key_name[-3:]
]
_lowerCAmelCase ='model.%s.weight' % nlayer
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =torch.tensor(a__ )
if key_name.startswith('model/wte' ):
_lowerCAmelCase ='lm_head.weight'
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =torch.tensor(a__ )
elif key_name.startswith('model/wob' ):
_lowerCAmelCase ='final_logits_bias'
_lowerCAmelCase =vnp.copy() # same in embedded
_lowerCAmelCase =state.reshape((1, -1) )
_lowerCAmelCase =torch.tensor(a__ )
elif key_name == "model/dense/kernel":
_lowerCAmelCase ='model.last_project.weight'
_lowerCAmelCase =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase =torch.tensor(a__ )
elif key_name == "model/dense_1/bias":
_lowerCAmelCase ='model.last_project.bias'
_lowerCAmelCase =vnp.copy() # same because it is one dimensional
_lowerCAmelCase =torch.tensor(a__ )
torch.save(a__ , args.output )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
lowercase_ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 58 | 0 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def SCREAMING_SNAKE_CASE_ ( __A : Tuple ) -> str:
_SCREAMING_SNAKE_CASE = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(__A , __A )
def SCREAMING_SNAKE_CASE_ ( __A : Optional[Any] ) -> int:
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = emb.weight.shape
_SCREAMING_SNAKE_CASE = nn.Linear(__A , __A , bias=__A )
_SCREAMING_SNAKE_CASE = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE_ ( __A : List[str] ) -> int:
_SCREAMING_SNAKE_CASE = torch.load(__A , map_location="cpu" )
_SCREAMING_SNAKE_CASE = mam_aaa["args"] or mam_aaa["cfg"]["model"]
_SCREAMING_SNAKE_CASE = mam_aaa["model"]
remove_ignore_keys_(__A )
_SCREAMING_SNAKE_CASE = state_dict["encoder.embed_tokens.weight"].shape[0]
_SCREAMING_SNAKE_CASE = MaMaaaConfig(
vocab_size=__A , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , )
_SCREAMING_SNAKE_CASE = state_dict["decoder.embed_tokens.weight"]
_SCREAMING_SNAKE_CASE = MaMaaaForConditionalGeneration(__A )
model.model.load_state_dict(__A , strict=__A )
_SCREAMING_SNAKE_CASE = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 418 |
'''simple docstring'''
from collections import defaultdict
from math import gcd
def SCREAMING_SNAKE_CASE_ ( __A : int = 1_50_00_00 ) -> int:
_SCREAMING_SNAKE_CASE = defaultdict(__A )
_SCREAMING_SNAKE_CASE = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , __A , 2 ):
if gcd(__A , __A ) > 1:
continue
_SCREAMING_SNAKE_CASE = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(__A , limit + 1 , __A ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 418 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : List[str] = logging.get_logger(__name__)
_UpperCamelCase : Union[str, Any] = {
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _lowerCAmelCase( _a):
"""simple docstring"""
lowerCamelCase__ = '''decision_transformer'''
lowerCamelCase__ = ['''past_key_values''']
lowerCamelCase__ = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , UpperCAmelCase=17 , UpperCAmelCase=4 , UpperCAmelCase=1_28 , UpperCAmelCase=40_96 , UpperCAmelCase=True , UpperCAmelCase=1 , UpperCAmelCase=10_24 , UpperCAmelCase=3 , UpperCAmelCase=1 , UpperCAmelCase=None , UpperCAmelCase="relu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=1e-5 , UpperCAmelCase=0.02 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=5_02_56 , UpperCAmelCase=5_02_56 , UpperCAmelCase=False , UpperCAmelCase=False , **UpperCAmelCase , )-> Optional[Any]:
__A = state_dim
__A = act_dim
__A = hidden_size
__A = max_ep_len
__A = action_tanh
__A = vocab_size
__A = n_positions
__A = n_layer
__A = n_head
__A = n_inner
__A = activation_function
__A = resid_pdrop
__A = embd_pdrop
__A = attn_pdrop
__A = layer_norm_epsilon
__A = initializer_range
__A = scale_attn_weights
__A = use_cache
__A = scale_attn_by_inverse_layer_idx
__A = reorder_and_upcast_attn
__A = bos_token_id
__A = eos_token_id
super().__init__(bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase )
| 703 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class _lowerCAmelCase:
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=14 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=32 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_12 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , )-> Dict:
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_token_type_ids
__A = use_input_mask
__A = use_labels
__A = use_mc_token_ids
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = type_sequence_label_size
__A = initializer_range
__A = num_labels
__A = num_choices
__A = scope
__A = self.vocab_size - 1
def SCREAMING_SNAKE_CASE__ ( self )-> Optional[int]:
__A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A = None
if self.use_input_mask:
__A = random_attention_mask([self.batch_size, self.seq_length] )
__A = None
if self.use_token_type_ids:
__A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__A = None
if self.use_mc_token_ids:
__A = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
__A = None
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A = ids_tensor([self.batch_size] , self.num_choices )
__A = self.get_config()
__A = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE__ ( self )-> List[str]:
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase )-> Tuple:
__A = CTRLModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
model(UpperCAmelCase , token_type_ids=UpperCAmelCase , head_mask=UpperCAmelCase )
model(UpperCAmelCase , token_type_ids=UpperCAmelCase )
__A = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase )-> Optional[Any]:
__A = CTRLLMHeadModel(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__A = model(UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self )-> Optional[int]:
__A = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) = config_and_inputs
__A = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , *UpperCAmelCase )-> Optional[int]:
__A = self.num_labels
__A = CTRLForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
__A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A = model(UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class _lowerCAmelCase( _a , _a , _a , unittest.TestCase):
"""simple docstring"""
lowerCamelCase__ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowerCamelCase__ = (CTRLLMHeadModel,) if is_torch_available() else ()
lowerCamelCase__ = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = False
def SCREAMING_SNAKE_CASE__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )-> Dict:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE__ ( self )-> str:
__A = CTRLModelTester(self )
__A = ConfigTester(self , config_class=UpperCAmelCase , n_embd=37 )
def SCREAMING_SNAKE_CASE__ ( self )-> Union[str, Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self )-> List[str]:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self )-> List[Any]:
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self )-> Dict:
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*UpperCAmelCase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE__ ( self )-> Optional[int]:
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self )-> Optional[Any]:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = CTRLModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def SCREAMING_SNAKE_CASE__ ( self )-> Union[str, Any]:
pass
@require_torch
class _lowerCAmelCase( unittest.TestCase):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self )-> Dict:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def SCREAMING_SNAKE_CASE__ ( self )-> List[Any]:
__A = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(UpperCAmelCase )
__A = torch.tensor(
[[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=UpperCAmelCase ) # Legal the president is
__A = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__A = model.generate(UpperCAmelCase , do_sample=UpperCAmelCase )
self.assertListEqual(output_ids[0].tolist() , UpperCAmelCase )
| 341 | 0 |
"""simple docstring"""
def __magic_name__ ( __snake_case : Union[str, Any] ) -> list:
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
lowercase : Optional[int] = gray_code_sequence_string(a_ )
#
# convert them to integers
for i in range(len(a_ ) ):
lowercase : List[Any] = int(sequence[i] , 2 )
return sequence
def __magic_name__ ( __snake_case : Optional[Any] ) -> list:
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
lowercase : Tuple = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
lowercase : List[str] = gray_code_sequence_string(bit_count - 1 )
lowercase : List[Any] = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
lowercase : Any = "0" + smaller_sequence[i]
sequence.append(a_ )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
lowercase : Optional[Any] = "1" + smaller_sequence[i]
sequence.append(a_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def UpperCAmelCase ( a_ = "isbn/0140328726" ) -> dict:
"""simple docstring"""
__A = olid.strip().strip("/" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("/" ) != 1:
__A = F'''{olid} is not a valid Open Library olid'''
raise ValueError(a_ )
return requests.get(F'''https://openlibrary.org/{new_olid}.json''' ).json()
def UpperCAmelCase ( a_ ) -> dict:
"""simple docstring"""
__A = {
"title": "Title",
"publish_date": "Publish date",
"authors": "Authors",
"number_of_pages": "Number of pages:",
"first_sentence": "First sentence",
"isbn_10": "ISBN (10)",
"isbn_13": "ISBN (13)",
}
__A = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
__A = [
get_openlibrary_data(author["key"] )["name"] for author in data["Authors"]
]
__A = data["First sentence"]["value"]
for key, value in data.items():
if isinstance(a_ , a_ ):
__A = ", ".join(a_ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
SCREAMING_SNAKE_CASE :int = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(f'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(f'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
SCREAMING_SNAKE_CASE :Any = summarize_book(get_openlibrary_data(f'''isbn/{isbn}'''))
print('\n'.join(f'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(f'''Sorry, there are no results for ISBN: {isbn}.''')
| 55 | 0 |
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class A__ ( unittest.TestCase):
"""simple docstring"""
@slow
def a__ ( self: Dict )-> Optional[Any]:
lowerCamelCase : Union[str, Any] = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" )
lowerCamelCase : int = AutoTokenizer.from_pretrained("""google/mt5-small""" )
lowerCamelCase : Any = tokenizer("""Hello there""" , return_tensors="""np""" ).input_ids
lowerCamelCase : List[str] = tokenizer("""Hi I am""" , return_tensors="""np""" ).input_ids
lowerCamelCase : Any = shift_tokens_right(lowercase_ , model.config.pad_token_id , model.config.decoder_start_token_id )
lowerCamelCase : int = model(lowercase_ , decoder_input_ids=lowercase_ ).logits
lowerCamelCase : List[Any] = optax.softmax_cross_entropy(lowercase_ , onehot(lowercase_ , logits.shape[-1] ) ).mean()
lowerCamelCase : Any = -(labels.shape[-1] * loss.item())
lowerCamelCase : str = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 715 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Tuple = logging.get_logger(__name__)
__lowerCamelCase :Any = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple ='''glpn'''
def __init__( self: Dict , __a: List[str]=3 , __a: Optional[int]=4 , __a: Dict=[2, 2, 2, 2] , __a: str=[8, 4, 2, 1] , __a: Optional[int]=[32, 64, 160, 256] , __a: Dict=[7, 3, 3, 3] , __a: Dict=[4, 2, 2, 2] , __a: Optional[Any]=[1, 2, 5, 8] , __a: Tuple=[4, 4, 4, 4] , __a: int="gelu" , __a: Union[str, Any]=0.0 , __a: str=0.0 , __a: Union[str, Any]=0.02 , __a: str=0.1 , __a: Union[str, Any]=1e-6 , __a: Any=64 , __a: Dict=10 , __a: Union[str, Any]=-1 , **__a: Optional[Any] , )-> Dict:
super().__init__(**__a )
lowerCamelCase : Dict = num_channels
lowerCamelCase : Any = num_encoder_blocks
lowerCamelCase : Dict = depths
lowerCamelCase : List[str] = sr_ratios
lowerCamelCase : Dict = hidden_sizes
lowerCamelCase : Tuple = patch_sizes
lowerCamelCase : Optional[int] = strides
lowerCamelCase : Optional[Any] = mlp_ratios
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : Any = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : List[Any] = initializer_range
lowerCamelCase : Dict = drop_path_rate
lowerCamelCase : Any = layer_norm_eps
lowerCamelCase : Optional[Any] = decoder_hidden_size
lowerCamelCase : Tuple = max_depth
lowerCamelCase : Optional[Any] = head_in_index
| 42 | 0 |
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
lowercase__ : Union[str, Any] = get_logger(__name__)
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any]=None ) -> Any:
'''simple docstring'''
_UpperCamelCase = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('''__''' ):
setattr(self , lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
_UpperCamelCase = module._original_module if isinstance(lowerCAmelCase__ , _PatchedModuleObj ) else module
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : Union[str, Any] = []
def __init__( self : int , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any]=None ) -> Any:
'''simple docstring'''
_UpperCamelCase = obj
_UpperCamelCase = target
_UpperCamelCase = new
_UpperCamelCase = target.split('''.''' )[0]
_UpperCamelCase = {}
_UpperCamelCase = attrs or []
def __enter__( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
*_UpperCamelCase , _UpperCamelCase = self.target.split('''.''' )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(lowerCAmelCase__ ) ):
try:
_UpperCamelCase = import_module('''.'''.join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
_UpperCamelCase = getattr(self.obj , lowerCAmelCase__ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(lowerCAmelCase__ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
_UpperCamelCase = obj_attr
# patch at top level
setattr(self.obj , lowerCAmelCase__ , _PatchedModuleObj(lowerCAmelCase__ , attrs=self.attrs ) )
_UpperCamelCase = getattr(self.obj , lowerCAmelCase__ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(lowerCAmelCase__ , lowerCAmelCase__ , _PatchedModuleObj(getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , attrs=self.attrs ) )
_UpperCamelCase = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
# finally set the target attribute
setattr(lowerCAmelCase__ , lowerCAmelCase__ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
_UpperCamelCase = getattr(import_module('''.'''.join(lowerCAmelCase__ ) ) , lowerCAmelCase__ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , lowerCAmelCase__ ) is attr_value:
_UpperCamelCase = getattr(self.obj , lowerCAmelCase__ )
setattr(self.obj , lowerCAmelCase__ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
_UpperCamelCase = globals()['''__builtins__'''][target_attr]
setattr(self.obj , lowerCAmelCase__ , self.new )
else:
raise RuntimeError(f"""Tried to patch attribute {target_attr} instead of a submodule.""" )
def __exit__( self : Tuple , *lowerCAmelCase__ : Optional[Any] ) -> int:
'''simple docstring'''
for attr in list(self.original ):
setattr(self.obj , lowerCAmelCase__ , self.original.pop(lowerCAmelCase__ ) )
def snake_case__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
self.__enter__()
self._active_patches.append(self )
def snake_case__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 98 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
lowercase__ : Dict = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def a__ ( lowercase : List[str] ) -> List[Any]:
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
_UpperCamelCase = k.replace(lowercase, lowercase )
return k
def a__ ( lowercase : dict, lowercase : dict ) -> PegasusForConditionalGeneration:
"""simple docstring"""
_UpperCamelCase = DEFAULTS.copy()
cfg_kwargs.update(lowercase )
_UpperCamelCase = PegasusConfig(**lowercase )
_UpperCamelCase = PegasusForConditionalGeneration(lowercase )
_UpperCamelCase = torch_model.model.state_dict()
_UpperCamelCase = {}
for k, v in tf_weights.items():
_UpperCamelCase = rename_state_dict_key(lowercase )
if new_k not in sd:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
_UpperCamelCase = v.T
_UpperCamelCase = torch.tensor(lowercase, dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
_UpperCamelCase = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
_UpperCamelCase = mapping['''shared.weight''']
_UpperCamelCase = mapping['''shared.weight''']
_UpperCamelCase = {k: torch.zeros_like(lowercase ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**lowercase )
_UpperCamelCase , _UpperCamelCase = torch_model.model.load_state_dict(lowercase, strict=lowercase )
_UpperCamelCase = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def a__ ( lowercase : List[Any]="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
"""simple docstring"""
_UpperCamelCase = tf.train.list_variables(lowercase )
_UpperCamelCase = {}
_UpperCamelCase = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(lowercase, desc='''converting tf checkpoint to dict''' ):
_UpperCamelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
_UpperCamelCase = tf.train.load_variable(lowercase, lowercase )
_UpperCamelCase = array
return tf_weights
def a__ ( lowercase : str, lowercase : str ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = Path(lowercase ).parent.name
_UpperCamelCase = task_specific_params[F"""summarization_{dataset}"""]['''max_position_embeddings''']
_UpperCamelCase = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''', model_max_length=lowercase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(lowercase )
# convert model
_UpperCamelCase = get_tf_weights_as_numpy(lowercase )
_UpperCamelCase = task_specific_params[F"""summarization_{dataset}"""]
if dataset == "large":
_UpperCamelCase = task_specific_params
_UpperCamelCase = convert_pegasus(lowercase, lowercase )
torch_model.save_pretrained(lowercase )
_UpperCamelCase = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(lowercase, Path(lowercase ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
lowercase__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
lowercase__ : Optional[int] = parser.parse_args()
if args.save_dir is None:
lowercase__ : Tuple = Path(args.tf_ckpt_path).parent.name
lowercase__ : Optional[Any] = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 98 | 1 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _lowerCamelCase :
@staticmethod
def __lowerCamelCase ( *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
pass
def __lowercase( __snake_case : Dict ) -> Optional[int]:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
lowerCamelCase_ : List[str] = (
"""https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"""
)
@is_pipeline_test
@require_torch
@require_vision
class _lowerCamelCase (unittest.TestCase ):
lowercase__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__snake_case = pipeline(
'document-question-answering' , model=A_ , tokenizer=A_ , image_processor=A_ )
__snake_case = INVOICE_URL
__snake_case = list(zip(*apply_tesseract(load_image(A_ ) , A_ , '' ) ) )
__snake_case = "What is the placebo?"
__snake_case = [
{
"image": load_image(A_ ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__snake_case = dqa_pipeline(A_ , top_k=2 )
self.assertEqual(
A_ , [
[
{'score': ANY(A_ ), 'answer': ANY(A_ ), 'start': ANY(A_ ), 'end': ANY(A_ )},
{'score': ANY(A_ ), 'answer': ANY(A_ ), 'start': ANY(A_ ), 'end': ANY(A_ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def __lowerCamelCase ( self ):
__snake_case = pipeline('document-question-answering' , model='hf-internal-testing/tiny-random-layoutlmv2' )
__snake_case = INVOICE_URL
__snake_case = "How many cats are there?"
__snake_case = [
{"score": 0.0_0_0_1, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0_0_0_1, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
__snake_case = dqa_pipeline(image=A_ , question=A_ , top_k=2 )
self.assertEqual(nested_simplify(A_ , decimals=4 ) , A_ )
__snake_case = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(nested_simplify(A_ , decimals=4 ) , A_ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__snake_case = "./tests/fixtures/tests_samples/COCO/000000039769.png"
__snake_case = dqa_pipeline(image=A_ , question=A_ , top_k=2 )
self.assertEqual(A_ , [] )
# We can optionnally pass directly the words and bounding boxes
__snake_case = "./tests/fixtures/tests_samples/COCO/000000039769.png"
__snake_case = []
__snake_case = []
__snake_case = dqa_pipeline(image=A_ , question=A_ , words=A_ , boxes=A_ , top_k=2 )
self.assertEqual(A_ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __lowerCamelCase ( self ):
__snake_case = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , )
__snake_case = INVOICE_URL
__snake_case = "What is the invoice number?"
__snake_case = dqa_pipeline(image=A_ , question=A_ , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
{'score': 0.9_9_4_4, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_0_0_9, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
__snake_case = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
{'score': 0.9_9_4_4, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_0_0_9, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
__snake_case = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
[
{'score': 0.9_9_4_4, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_0_0_9, 'answer': 'us-001', 'start': 16, 'end': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __lowerCamelCase ( self ):
__snake_case = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , max_seq_len=50 , )
__snake_case = INVOICE_URL
__snake_case = "What is the invoice number?"
__snake_case = dqa_pipeline(image=A_ , question=A_ , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
{'score': 0.9_9_7_4, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9_9_4_8, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
__snake_case = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
{'score': 0.9_9_7_4, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9_9_4_8, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
__snake_case = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
[
{'score': 0.9_9_7_4, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9_9_4_8, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __lowerCamelCase ( self ):
__snake_case = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=A_ )
__snake_case = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=A_ , revision='3dc6de3' , )
__snake_case = INVOICE_URL
__snake_case = "What is the invoice number?"
__snake_case = dqa_pipeline(image=A_ , question=A_ , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
{'score': 0.4_2_5_1, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_8_1_9, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
__snake_case = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
{'score': 0.4_2_5_1, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_8_1_9, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
__snake_case = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
[
{'score': 0.4_2_5_1, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_8_1_9, 'answer': '1110212019', 'start': 23, 'end': 23},
]
]
* 2 , )
__snake_case = list(zip(*apply_tesseract(load_image(A_ ) , A_ , '' ) ) )
# This model should also work if `image` is set to None
__snake_case = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
{'score': 0.4_2_5_1, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_8_1_9, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __lowerCamelCase ( self ):
__snake_case = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=A_ )
__snake_case = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=A_ , revision='3dc6de3' , max_seq_len=50 , )
__snake_case = INVOICE_URL
__snake_case = "What is the invoice number?"
__snake_case = dqa_pipeline(image=A_ , question=A_ , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
{'score': 0.9_9_9_9, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9_9_9_8, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
__snake_case = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
[
{'score': 0.9_9_9_9, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9_9_9_8, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 , )
__snake_case = list(zip(*apply_tesseract(load_image(A_ ) , A_ , '' ) ) )
# This model should also work if `image` is set to None
__snake_case = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(A_ , decimals=4 ) , [
{'score': 0.9_9_9_9, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9_9_9_8, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
@slow
@require_torch
def __lowerCamelCase ( self ):
__snake_case = pipeline(
'document-question-answering' , model='naver-clova-ix/donut-base-finetuned-docvqa' , tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa' ) , feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' , )
__snake_case = INVOICE_URL
__snake_case = "What is the invoice number?"
__snake_case = dqa_pipeline(image=A_ , question=A_ , top_k=2 )
self.assertEqual(nested_simplify(A_ , decimals=4 ) , [{'answer': 'us-001'}] )
@require_tf
@unittest.skip('Document question answering not implemented in TF' )
def __lowerCamelCase ( self ):
pass
| 716 |
from manim import *
class _lowerCamelCase (lowerCamelCase ):
def __lowerCamelCase ( self ):
__snake_case = Rectangle(height=0.5 , width=0.5 )
__snake_case = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
__snake_case = [mem.copy() for i in range(6 )]
__snake_case = [mem.copy() for i in range(6 )]
__snake_case = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
__snake_case = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
__snake_case = VGroup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
__snake_case = Text('CPU' , font_size=24 )
__snake_case = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(SCREAMING_SNAKE_CASE_ )
__snake_case = [mem.copy() for i in range(1 )]
__snake_case = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
__snake_case = Text('GPU' , font_size=24 )
__snake_case = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_ )
gpu.align_to(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(SCREAMING_SNAKE_CASE_ )
__snake_case = [mem.copy() for i in range(6 )]
__snake_case = VGroup(*SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0 )
__snake_case = Text('Model' , font_size=24 )
__snake_case = Group(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).arrange(SCREAMING_SNAKE_CASE_ , buff=0.5 , aligned_edge=SCREAMING_SNAKE_CASE_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(SCREAMING_SNAKE_CASE_ , run_time=1 ) , Create(SCREAMING_SNAKE_CASE_ , run_time=1 ) , Create(SCREAMING_SNAKE_CASE_ , run_time=1 ) , )
__snake_case = MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
__snake_case = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__snake_case = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(SCREAMING_SNAKE_CASE_ , run_time=2.5 ) , Write(SCREAMING_SNAKE_CASE_ ) , Write(SCREAMING_SNAKE_CASE_ ) )
self.add(SCREAMING_SNAKE_CASE_ )
__snake_case = []
__snake_case = []
__snake_case = []
for i, rect in enumerate(SCREAMING_SNAKE_CASE_ ):
__snake_case = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(SCREAMING_SNAKE_CASE_ , opacity=0.7 )
cpu_target.move_to(SCREAMING_SNAKE_CASE_ )
cpu_target.generate_target()
__snake_case = 0.4_6 / 4
__snake_case = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=SCREAMING_SNAKE_CASE_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=SCREAMING_SNAKE_CASE_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=SCREAMING_SNAKE_CASE_ , buff=0.0 )
cpu_targs.append(SCREAMING_SNAKE_CASE_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(SCREAMING_SNAKE_CASE_ ) )
second_animations.append(MoveToTarget(SCREAMING_SNAKE_CASE_ , run_time=1.5 ) )
self.play(*SCREAMING_SNAKE_CASE_ )
self.play(*SCREAMING_SNAKE_CASE_ )
self.wait()
| 345 | 0 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 75 | """simple docstring"""
import os
def lowerCAmelCase_ () -> List[str]:
a_ : List[Any] = os.path.join(os.path.dirname(_SCREAMING_SNAKE_CASE ) , "num.txt" )
with open(_SCREAMING_SNAKE_CASE ) as file_hand:
return str(sum(int(_SCREAMING_SNAKE_CASE ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 473 | 0 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def _snake_case ( A_ : Optional[Any] ):
"""simple docstring"""
a_ : Any = os.path.join(args.tf_model_dir , """parameters.json""" )
a_ : Tuple = json.loads(open(A_ ).read() )
if not params:
raise ValueError(
f'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''' )
if not args.output.endswith(""".pt""" ):
a_ : Dict = args.output + """.pt"""
a_ : Optional[Any] = OrderedDict()
with tf.device("""/CPU:0""" ):
a_ : Tuple = tf.train.load_checkpoint(args.tf_model_dir )
a_ : Union[str, Any] = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
a_ : int = reader.get_tensor(A_ ).astype(np.floataa )
if key_name.endswith("""/adam_m""" ) or key_name.endswith("""/adam_v""" ):
continue
if key_name.startswith("""pasts/""" ):
if key_name.startswith("""pasts/mlp""" ):
a_ : List[Any] = int(key_name[9] )
elif key_name.startswith("""pasts/out""" ):
a_ : Optional[Any] = 8
a_ : List[Any] = """model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
a_ : Tuple = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a_ : Tuple = torch.tensor(A_ )
elif key_name.startswith("""model/moe""" ):
a_ : Union[str, Any] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/switch_gating/kernel""" ):
a_ : Any = """model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player
a_ : List[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a_ : Optional[int] = torch.tensor(A_ )
elif key_name.endswith("""/softmlp/kernel""" ):
a_ : Any = """model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player
a_ : List[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a_ : Optional[Any] = torch.tensor(A_ )
elif key_name.endswith("""/wo/kernel""" ) or key_name.endswith("""/wi/kernel""" ):
a_ : Optional[Any] = key_name[-9:-7]
for i in range(16 ):
a_ : Optional[int] = """model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer)
a_ : Any = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
a_ : Optional[Any] = torch.tensor(A_ )
elif key_name.startswith("""model/mlp""" ):
a_ : Union[str, Any] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/p1/kernel""" ):
a_ : List[Any] = """model.blocks.%d.feed_forward.mlp.wi.weight""" % player
a_ : Any = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a_ : Dict = torch.tensor(A_ )
elif key_name.endswith("""/p1/bias""" ):
a_ : Optional[int] = """model.blocks.%d.feed_forward.mlp.wi.bias""" % player
a_ : Optional[Any] = vnp.copy() # same because it is one dimensional
a_ : Union[str, Any] = torch.tensor(A_ )
elif key_name.endswith("""/p2/kernel""" ):
a_ : Optional[Any] = """model.blocks.%d.feed_forward.mlp.wo.weight""" % player
a_ : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a_ : Optional[Any] = torch.tensor(A_ )
elif key_name.endswith("""/p2/bias""" ):
a_ : int = """model.blocks.%d.feed_forward.mlp.wo.bias""" % player
a_ : Tuple = vnp.copy() # same because it is one dimensional
a_ : List[str] = torch.tensor(A_ )
elif key_name.startswith("""model/ln""" ):
a_ : str = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
a_ : str = """model.blocks.%d.feed_forward.norm.bias""" % player
a_ : Any = vnp.copy() # same because it is one dimensional
a_ : Any = torch.tensor(A_ )
elif key_name.endswith("""/g""" ):
a_ : List[str] = """model.blocks.%d.feed_forward.norm.weight""" % player
a_ : Union[str, Any] = vnp.copy() # same because it is one dimensional
a_ : Any = torch.tensor(A_ )
elif key_name.startswith("""model/att""" ):
a_ : Union[str, Any] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/qkv/kernel""" ):
a_ : int = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
a_ : Dict = state[:, 0, :, :]
a_ : Any = state[:, 1, :, :]
a_ : Union[str, Any] = state[:, 2, :, :]
a_ : str = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
a_ : Any = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
a_ : int = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
a_ : Any = """model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player
a_ : Tuple = torch.tensor(A_ )
a_ : Union[str, Any] = """model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player
a_ : Optional[Any] = torch.tensor(A_ )
a_ : Tuple = """model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player
a_ : Tuple = torch.tensor(A_ )
elif key_name.endswith("""/o/kernel""" ):
a_ : Optional[Any] = """model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player
a_ : Optional[Any] = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
a_ : Dict = torch.tensor(A_ )
elif key_name.startswith("""model/an""" ):
a_ : str = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
a_ : Dict = """model.blocks.%d.self_attn.norm.bias""" % player
a_ : Tuple = vnp.copy() # same because it is one dimensional
a_ : Tuple = torch.tensor(A_ )
elif key_name.endswith("""/g""" ):
a_ : List[Any] = """model.blocks.%d.self_attn.norm.weight""" % player
a_ : Union[str, Any] = vnp.copy() # same because it is one dimensional
a_ : Optional[Any] = torch.tensor(A_ )
elif (
key_name.startswith("""model/wte""" )
or key_name.startswith("""model/wpe""" )
or key_name.startswith("""model/ete""" )
):
a_ : Any = {"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[
key_name[-3:]
]
a_ : int = """model.%s.weight""" % nlayer
a_ : Optional[Any] = vnp.copy() # same in embedded
a_ : str = torch.tensor(A_ )
if key_name.startswith("""model/wte""" ):
a_ : Any = """lm_head.weight"""
a_ : Any = vnp.copy() # same in embedded
a_ : Union[str, Any] = torch.tensor(A_ )
elif key_name.startswith("""model/wob""" ):
a_ : List[Any] = """final_logits_bias"""
a_ : Optional[int] = vnp.copy() # same in embedded
a_ : Any = state.reshape((1, -1) )
a_ : Dict = torch.tensor(A_ )
elif key_name == "model/dense/kernel":
a_ : Optional[int] = """model.last_project.weight"""
a_ : Optional[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a_ : Tuple = torch.tensor(A_ )
elif key_name == "model/dense_1/bias":
a_ : Tuple = """model.last_project.bias"""
a_ : List[str] = vnp.copy() # same because it is one dimensional
a_ : Union[str, Any] = torch.tensor(A_ )
torch.save(A_ , args.output )
if __name__ == "__main__":
__snake_case: Any = argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
__snake_case: Tuple = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 460 |
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case: List[str] = logging.get_logger(__name__)
__snake_case: Dict = "https://openaipublic.azureedge.net/jukebox/models/"
__snake_case: List[str] = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def _snake_case ( A_ : str ):
"""simple docstring"""
if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 10:
a_ : Optional[Any] = key.replace(""".model.1.bias""" , """.conv1d_1.bias""" )
elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 10:
a_ : Tuple = key.replace(""".model.1.weight""" , """.conv1d_1.weight""" )
elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 10:
a_ : Union[str, Any] = key.replace(""".model.3.bias""" , """.conv1d_2.bias""" )
elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 10:
a_ : List[str] = key.replace(""".model.3.weight""" , """.conv1d_2.weight""" )
if "conditioner_blocks.0." in key:
a_ : str = key.replace("""conditioner_blocks.0""" , """conditioner_blocks""" )
if "prime_prior" in key:
a_ : Any = key.replace("""prime_prior""" , """encoder""" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
a_ : Optional[Any] = key.replace(""".emb.""" , """.""" )
if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(""".k""" , """.codebook""" )
if "y_emb." in key:
return key.replace("""y_emb.""" , """metadata_embedding.""" )
if "x_emb.emb." in key:
a_ : Optional[Any] = key.replace("""0.x_emb.emb""" , """embed_tokens""" )
if "prime_state_ln" in key:
return key.replace("""prime_state_ln""" , """encoder.final_layer_norm""" )
if ".ln" in key:
return key.replace(""".ln""" , """.layer_norm""" )
if "_ln" in key:
return key.replace("""_ln""" , """_layer_norm""" )
if "prime_state_proj" in key:
return key.replace("""prime_state_proj""" , """encoder.proj_in""" )
if "prime_x_out" in key:
return key.replace("""prime_x_out""" , """encoder.lm_head""" )
if "prior.x_out" in key:
return key.replace("""x_out""" , """fc_proj_out""" )
if "x_emb" in key:
return key.replace("""x_emb""" , """embed_tokens""" )
return key
def _snake_case ( A_ : str , A_ : int , A_ : str , A_ : List[Any] ):
"""simple docstring"""
a_ : List[str] = {}
import re
a_ : Optional[Any] = re.compile(R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
a_ : int = re.compile(
R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
a_ : Optional[Any] = re.compile(R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
a_ : int = re.compile(R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
a_ : List[str] = re.compile(
R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
a_ : List[Any] = re.compile(R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
a_ : Tuple = re.compile(R"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" )
a_ : Dict = re.compile(
R"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
a_ : Any = re.compile(R"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(A_ ):
a_ : Any = re_encoder_block_conv_in.match(A_ )
a_ : str = regex_match.groups()
a_ : Tuple = int(groups[2] ) * 2 + int(groups[3] )
a_ : Optional[Any] = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
a_ : Any = re_encoder_block_conv_in.sub(A_ , A_ )
elif re_encoder_block_resnet.fullmatch(A_ ):
a_ : Tuple = re_encoder_block_resnet.match(A_ )
a_ : Optional[Any] = regex_match.groups()
a_ : str = int(groups[2] ) * 2 + int(groups[3] )
a_ : str = {"""1""": 1, """3""": 2}[groups[-2]]
a_ : str = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
a_ : List[Any] = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
a_ : List[str] = prefix + resnet_block
a_ : Optional[Any] = re_encoder_block_resnet.sub(A_ , A_ )
elif re_encoder_block_proj_out.fullmatch(A_ ):
a_ : Tuple = re_encoder_block_proj_out.match(A_ )
a_ : List[str] = regex_match.groups()
a_ : int = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
a_ : Optional[Any] = re_encoder_block_proj_out.sub(A_ , A_ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(A_ ):
a_ : Union[str, Any] = re_decoder_block_conv_out.match(A_ )
a_ : Union[str, Any] = regex_match.groups()
a_ : List[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
a_ : Optional[int] = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
a_ : int = re_decoder_block_conv_out.sub(A_ , A_ )
elif re_decoder_block_resnet.fullmatch(A_ ):
a_ : Tuple = re_decoder_block_resnet.match(A_ )
a_ : str = regex_match.groups()
a_ : List[str] = int(groups[2] ) * 2 + int(groups[3] ) - 2
a_ : Optional[Any] = {"""1""": 1, """3""": 2}[groups[-2]]
a_ : Any = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
a_ : int = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
a_ : Optional[Any] = prefix + resnet_block
a_ : Any = re_decoder_block_resnet.sub(A_ , A_ )
elif re_decoder_block_proj_in.fullmatch(A_ ):
a_ : Tuple = re_decoder_block_proj_in.match(A_ )
a_ : Any = regex_match.groups()
a_ : Dict = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
a_ : Tuple = re_decoder_block_proj_in.sub(A_ , A_ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(A_ ):
a_ : int = re_prior_cond_conv_out.match(A_ )
a_ : Dict = regex_match.groups()
a_ : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
a_ : Any = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
a_ : str = re_prior_cond_conv_out.sub(A_ , A_ )
elif re_prior_cond_resnet.fullmatch(A_ ):
a_ : List[str] = re_prior_cond_resnet.match(A_ )
a_ : List[str] = regex_match.groups()
a_ : List[Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
a_ : Tuple = {"""1""": 1, """3""": 2}[groups[-2]]
a_ : Optional[Any] = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
a_ : Union[str, Any] = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
a_ : List[str] = prefix + resnet_block
a_ : Optional[Any] = re_prior_cond_resnet.sub(A_ , A_ )
elif re_prior_cond_proj_in.fullmatch(A_ ):
a_ : List[Any] = re_prior_cond_proj_in.match(A_ )
a_ : int = regex_match.groups()
a_ : Any = f'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
a_ : Union[str, Any] = re_prior_cond_proj_in.sub(A_ , A_ )
# keep original key
else:
a_ : str = original_key
a_ : Any = replace_key(A_ )
if f'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(f'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[f'''{key_prefix}.{key}'''].shape:
a_ : Tuple = model_state_dict[f'''{key_prefix}.{key}''']
print(f'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
a_ : Optional[Any] = original_key
a_ : Tuple = original_key
a_ : Union[str, Any] = value
return new_dict
@torch.no_grad()
def _snake_case ( A_ : Dict=None , A_ : Optional[Any]=None ):
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' ):
a_ : Any = requests.get(f'''{PREFIX}{file}''' , allow_redirects=A_ )
os.makedirs(f'''{pytorch_dump_folder_path}/''' , exist_ok=A_ )
open(f'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' , """wb""" ).write(r.content )
a_ : List[Any] = MODEL_MAPPING[model_name.split("""/""" )[-1]]
a_ : Optional[Any] = JukeboxConfig.from_pretrained(A_ )
a_ : List[Any] = JukeboxModel(A_ )
a_ : Optional[Any] = []
a_ : Optional[Any] = {}
for i, dict_name in enumerate(A_ ):
a_ : int = torch.load(f'''{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}''' )["""model"""]
a_ : Optional[int] = {}
for k in old_dic.keys():
if k.endswith(""".b""" ):
a_ : int = old_dic[k]
elif k.endswith(""".w""" ):
a_ : Dict = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
a_ : Dict = old_dic[k]
else:
a_ : Optional[int] = old_dic[k]
a_ : List[Any] = """vqvae""" if i == 0 else f'''priors.{3 - i}'''
a_ : Any = fix_jukebox_keys(A_ , model.state_dict() , A_ , A_ )
weight_dict.append(A_ )
a_ : str = weight_dict.pop(0 )
model.vqvae.load_state_dict(A_ )
for i in range(len(A_ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(A_ ).mkdir(exist_ok=A_ )
with open(f'''{pytorch_dump_folder_path}/mapping.json''' , """w""" ) as txtfile:
json.dump(A_ , A_ )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
return weight_dict
if __name__ == "__main__":
__snake_case: Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
__snake_case: Any = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 460 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
_lowercase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def A (__lowerCamelCase :Optional[int] ):
_lowerCAmelCase = {}
with open(__lowerCamelCase , """r""" ) as file:
for line_number, line in enumerate(__lowerCamelCase ):
_lowerCAmelCase = line.strip()
if line:
_lowerCAmelCase = line.split()
_lowerCAmelCase = line_number
_lowerCAmelCase = words[0]
_lowerCAmelCase = value
return result
def A (__lowerCamelCase :Optional[Any] , __lowerCamelCase :Any , __lowerCamelCase :Tuple , __lowerCamelCase :List[Any] , __lowerCamelCase :List[str] ):
for attribute in key.split(""".""" ):
_lowerCAmelCase = getattr(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowerCamelCase ):
_lowerCAmelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
_lowerCAmelCase = """param"""
if weight_type is not None and weight_type != "param":
_lowerCAmelCase = getattr(__lowerCamelCase , __lowerCamelCase ).shape
elif weight_type is not None and weight_type == "param":
_lowerCAmelCase = hf_pointer
for attribute in hf_param_name.split(""".""" ):
_lowerCAmelCase = getattr(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = shape_pointer.shape
# let's reduce dimension
_lowerCAmelCase = value[0]
else:
_lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
_lowerCAmelCase = value
elif weight_type == "weight_g":
_lowerCAmelCase = value
elif weight_type == "weight_v":
_lowerCAmelCase = value
elif weight_type == "bias":
_lowerCAmelCase = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
_lowerCAmelCase = getattr(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = value
else:
_lowerCAmelCase = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def A (__lowerCamelCase :Union[str, Any] , __lowerCamelCase :Tuple , __lowerCamelCase :Dict , __lowerCamelCase :List[Any] , __lowerCamelCase :int ):
_lowerCAmelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowerCamelCase ):
_lowerCAmelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
_lowerCAmelCase = """param"""
if weight_type is not None and weight_type != "param":
_lowerCAmelCase = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
_lowerCAmelCase = """.""".join([key, hf_param_name] )
else:
_lowerCAmelCase = key
_lowerCAmelCase = value if """lm_head""" in full_key else value[0]
_lowercase = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def A (__lowerCamelCase :Any , __lowerCamelCase :int , __lowerCamelCase :List[str]=None , __lowerCamelCase :List[Any]=None ):
_lowerCAmelCase = False
for key, mapped_key in MAPPING.items():
_lowerCAmelCase = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
_lowerCAmelCase = True
if "*" in mapped_key:
_lowerCAmelCase = name.split(__lowerCamelCase )[0].split(""".""" )[-2]
_lowerCAmelCase = mapped_key.replace("""*""" , __lowerCamelCase )
if "weight_g" in name:
_lowerCAmelCase = """weight_g"""
elif "weight_v" in name:
_lowerCAmelCase = """weight_v"""
elif "bias" in name:
_lowerCAmelCase = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCAmelCase = """weight"""
else:
_lowerCAmelCase = None
if hf_dict is not None:
rename_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return is_used
return is_used
def A (__lowerCamelCase :Any , __lowerCamelCase :Dict , __lowerCamelCase :Dict ):
_lowerCAmelCase = []
_lowerCAmelCase = fairseq_model.state_dict()
_lowerCAmelCase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
_lowerCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , )
_lowerCAmelCase = True
else:
_lowerCAmelCase = load_wavaveca_layer(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def A (__lowerCamelCase :Tuple , __lowerCamelCase :Optional[int] , __lowerCamelCase :Any , __lowerCamelCase :List[Any] , __lowerCamelCase :List[Any] ):
_lowerCAmelCase = full_name.split("""conv_layers.""" )[-1]
_lowerCAmelCase = name.split(""".""" )
_lowerCAmelCase = int(items[0] )
_lowerCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
_lowerCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
_lowerCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
_lowerCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
_lowerCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def A (__lowerCamelCase :List[str] , __lowerCamelCase :Tuple , __lowerCamelCase :List[Any]=None , __lowerCamelCase :Union[str, Any]=None , __lowerCamelCase :str=True , __lowerCamelCase :str=False ):
if config_path is not None:
_lowerCAmelCase = WavaVecaConfig.from_pretrained(__lowerCamelCase )
else:
_lowerCAmelCase = WavaVecaConfig()
if is_seq_class:
_lowerCAmelCase = read_txt_into_dict(__lowerCamelCase )
_lowerCAmelCase = idalabel
_lowerCAmelCase = WavaVecaForSequenceClassification(__lowerCamelCase )
_lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
feature_extractor.save_pretrained(__lowerCamelCase )
elif is_finetuned:
if dict_path:
_lowerCAmelCase = Dictionary.load(__lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCAmelCase = target_dict.pad_index
_lowerCAmelCase = target_dict.bos_index
_lowerCAmelCase = target_dict.eos_index
_lowerCAmelCase = len(target_dict.symbols )
_lowerCAmelCase = os.path.join(__lowerCamelCase , """vocab.json""" )
if not os.path.isdir(__lowerCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCamelCase ) )
return
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
_lowerCAmelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCAmelCase = 0
_lowerCAmelCase = 1
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = WavaVecaCTCTokenizer(
__lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCamelCase , )
_lowerCAmelCase = True if config.feat_extract_norm == """layer""" else False
_lowerCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
_lowerCAmelCase = WavaVecaProcessor(feature_extractor=__lowerCamelCase , tokenizer=__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
_lowerCAmelCase = WavaVecaForCTC(__lowerCamelCase )
else:
_lowerCAmelCase = WavaVecaForPreTraining(__lowerCamelCase )
if is_finetuned or is_seq_class:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
_lowerCAmelCase = argparse.Namespace(task="""audio_pretraining""" )
_lowerCAmelCase = fairseq.tasks.setup_task(__lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__lowerCamelCase )
_lowerCAmelCase = model[0].eval()
recursively_load_weights(__lowerCamelCase , __lowerCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
_lowercase = parser.parse_args()
_lowercase = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 5 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"""configuration_autoformer""": [
"""AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AutoformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Tuple = [
"""AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AutoformerForPrediction""",
"""AutoformerModel""",
"""AutoformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 112 | 0 |
import copy
import re
class lowerCamelCase :
'''simple docstring'''
lowerCAmelCase_ : Tuple = 'hp'
lowerCAmelCase_ : int = {}
lowerCAmelCase_ : Tuple = None
@classmethod
def A__ ( cls , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = prefix
UpperCAmelCase_ = defaults
cls.build_naming_info()
@staticmethod
def A__ ( lowerCAmelCase , lowerCAmelCase ):
if len(__lowerCAmelCase ) == 0:
return ""
UpperCAmelCase_ = None
if any(char.isdigit() for char in word ):
raise Exception(f'''Parameters should not contain numbers: \'{word}\' contains a number''' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(__lowerCAmelCase ) + 1 ):
UpperCAmelCase_ = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
UpperCAmelCase_ = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(lowerCAmelCase ):
UpperCAmelCase_ = ""
while integer != 0:
UpperCAmelCase_ = chr(ord("A" ) + integer % 10 ) + s
integer //= 10
return s
UpperCAmelCase_ = 0
while True:
UpperCAmelCase_ = word + "#" + int_to_alphabetic(__lowerCAmelCase )
if sword in info["reverse_short_word"]:
continue
else:
UpperCAmelCase_ = sword
break
UpperCAmelCase_ = short_word
UpperCAmelCase_ = word
return short_word
@staticmethod
def A__ ( lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = param_name.split("_" )
UpperCAmelCase_ = [TrialShortNamer.shortname_for_word(__lowerCAmelCase , __lowerCAmelCase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
UpperCAmelCase_ = ["", "_"]
for separator in separators:
UpperCAmelCase_ = separator.join(__lowerCAmelCase )
if shortname not in info["reverse_short_param"]:
UpperCAmelCase_ = shortname
UpperCAmelCase_ = param_name
return shortname
return param_name
@staticmethod
def A__ ( lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = TrialShortNamer.shortname_for_key(__lowerCAmelCase , __lowerCAmelCase )
UpperCAmelCase_ = short_name
UpperCAmelCase_ = param_name
@classmethod
def A__ ( cls ):
if cls.NAMING_INFO is not None:
return
UpperCAmelCase_ = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
UpperCAmelCase_ = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(__lowerCAmelCase , __lowerCAmelCase )
UpperCAmelCase_ = info
@classmethod
def A__ ( cls , lowerCAmelCase ):
cls.build_naming_info()
assert cls.PREFIX is not None
UpperCAmelCase_ = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f'''You should provide a default value for the param name {k} with value {v}''' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
UpperCAmelCase_ = cls.NAMING_INFO["short_param"][k]
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCAmelCase_ = 1 if v else 0
UpperCAmelCase_ = "" if isinstance(__lowerCAmelCase , (int, float) ) else "-"
UpperCAmelCase_ = f'''{key}{sep}{v}'''
name.append(__lowerCAmelCase )
return "_".join(__lowerCAmelCase )
@classmethod
def A__ ( cls , lowerCAmelCase ):
UpperCAmelCase_ = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
UpperCAmelCase_ = []
else:
UpperCAmelCase_ = repr.split("_" )
UpperCAmelCase_ = {}
for value in values:
if "-" in value:
UpperCAmelCase_ , UpperCAmelCase_ = value.split("-" )
else:
UpperCAmelCase_ = re.sub("[0-9.]" , "" , __lowerCAmelCase )
UpperCAmelCase_ = float(re.sub("[^0-9.]" , "" , __lowerCAmelCase ) )
UpperCAmelCase_ = cls.NAMING_INFO["reverse_short_param"][p_k]
UpperCAmelCase_ = p_v
for k in cls.DEFAULTS:
if k not in parameters:
UpperCAmelCase_ = cls.DEFAULTS[k]
return parameters
| 709 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = [1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 0, 0, 0
UpperCAmelCase_ = ugly_nums[ia] * 2
UpperCAmelCase_ = ugly_nums[ia] * 3
UpperCAmelCase_ = ugly_nums[ia] * 5
for _ in range(1 , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
ugly_nums.append(__SCREAMING_SNAKE_CASE )
if next_num == next_a:
ia += 1
UpperCAmelCase_ = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
UpperCAmelCase_ = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
UpperCAmelCase_ = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(200) = }''')
| 23 | 0 |
import baseaa
def _A ( SCREAMING_SNAKE_CASE__ : str ):
return baseaa.aaaencode(string.encode('''utf-8''' ) )
def _A ( SCREAMING_SNAKE_CASE__ : bytes ):
return baseaa.aaadecode(SCREAMING_SNAKE_CASE__ ).decode('''utf-8''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 |
from __future__ import annotations
def _A ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :list[list[int]] = []
UpperCamelCase :list[int] = []
UpperCamelCase :List[str] = 0
UpperCamelCase :Any = sum(SCREAMING_SNAKE_CASE__ )
create_state_space_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return result
def _A ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : int , ):
if sum(SCREAMING_SNAKE_CASE__ ) > max_sum or (remaining_nums_sum + sum(SCREAMING_SNAKE_CASE__ )) < max_sum:
return
if sum(SCREAMING_SNAKE_CASE__ ) == max_sum:
result.append(SCREAMING_SNAKE_CASE__ )
return
for index in range(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) ):
create_state_space_tree(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 , [*path, nums[index]] , SCREAMING_SNAKE_CASE__ , remaining_nums_sum - nums[index] , )
__snake_case = [3, 34, 4, 12, 5, 2]
__snake_case = 9
__snake_case = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 658 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=__lowerCAmelCase )
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : str = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
UpperCAmelCase : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
UpperCAmelCase : ClassVar[Features] = Features({'''summary''': Value('''string''' )} )
UpperCAmelCase : str = "text"
UpperCAmelCase : str = "summary"
@property
def lowerCAmelCase_ ( self : List[str] ):
return {self.text_column: "text", self.summary_column: "summary"}
| 709 |
"""simple docstring"""
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
a = '''\
@inproceedings{lin-2004-rouge,
title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",
author = "Lin, Chin-Yew",
booktitle = "Text Summarization Branches Out",
month = jul,
year = "2004",
address = "Barcelona, Spain",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W04-1013",
pages = "74--81",
}
'''
a = '''\
ROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for
evaluating automatic summarization and machine translation software in natural language processing.
The metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.
Note that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.
This metrics is a wrapper around Google Research reimplementation of ROUGE:
https://github.com/google-research/google-research/tree/master/rouge
'''
a = '''
Calculates average rouge scores for a list of hypotheses and references
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
rouge_types: A list of rouge types to calculate.
Valid names:
`"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,
`"rougeL"`: Longest common subsequence based scoring.
`"rougeLSum"`: rougeLsum splits text using `"\n"`.
See details in https://github.com/huggingface/datasets/issues/617
use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.
use_aggregator: Return aggregates if this is set to True
Returns:
rouge1: rouge_1 (precision, recall, f1),
rouge2: rouge_2 (precision, recall, f1),
rougeL: rouge_l (precision, recall, f1),
rougeLsum: rouge_lsum (precision, recall, f1)
Examples:
>>> rouge = datasets.load_metric(\'rouge\')
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> results = rouge.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
[\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']
>>> print(results["rouge1"])
AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))
>>> print(results["rouge1"].mid.fmeasure)
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase_ ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/google-research/google-research/tree/master/rouge'] , reference_urls=[
'https://en.wikipedia.org/wiki/ROUGE_(metric)',
'https://github.com/google-research/google-research/tree/master/rouge',
] , )
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Dict=False ):
if rouge_types is None:
_A = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
_A = rouge_scorer.RougeScorer(rouge_types=_UpperCAmelCase , use_stemmer=_UpperCAmelCase )
if use_aggregator:
_A = scoring.BootstrapAggregator()
else:
_A = []
for ref, pred in zip(_UpperCAmelCase , _UpperCAmelCase ):
_A = scorer.score(_UpperCAmelCase , _UpperCAmelCase )
if use_aggregator:
aggregator.add_scores(_UpperCAmelCase )
else:
scores.append(_UpperCAmelCase )
if use_aggregator:
_A = aggregator.aggregate()
else:
_A = {}
for key in scores[0]:
_A = [score[key] for score in scores]
return result
| 505 | 0 |
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
snake_case_ : Dict = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
snake_case_ : Optional[int] = BASE_URL + '''/user'''
# https://github.com/settings/tokens
snake_case_ : Optional[Any] = os.environ.get('''USER_TOKEN''', '''''')
def lowercase__( _UpperCamelCase : str )-> dict[Any, Any]:
"""simple docstring"""
_UpperCamelCase = {
"Authorization": f"token {auth_token}",
"Accept": "application/vnd.github.v3+json",
}
return requests.get(_UpperCamelCase , headers=_UpperCamelCase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F"""{key}: {value}""")
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 138 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
snake_case_ : Union[str, Any] = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l='''
def lowercase__( _UpperCamelCase : str = "mumbai" )-> Generator[tuple[str, str], None, None]:
"""simple docstring"""
_UpperCamelCase = BeautifulSoup(requests.get(url + location ).content , "html.parser" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("div" , attrs={"data-tn-component": "organicJob"} ):
_UpperCamelCase = job.find("a" , attrs={"data-tn-element": "jobTitle"} ).text.strip()
_UpperCamelCase = job.find("span" , {"class": "company"} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('''Bangalore'''), 1):
print(F"""Job {i:>2} is {job[0]} at {job[1]}""")
| 138 | 1 |
'''simple docstring'''
from numpy import exp, pi, sqrt
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase = 0.0 , _UpperCamelCase = 1.0 ):
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Tuple = 'linear'
__lowerCamelCase: Any = 'cosine'
__lowerCamelCase: Optional[Any] = 'cosine_with_restarts'
__lowerCamelCase: Tuple = 'polynomial'
__lowerCamelCase: int = 'constant'
__lowerCamelCase: Optional[Any] = 'constant_with_warmup'
__lowerCamelCase: List[str] = 'piecewise_constant'
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
return LambdaLR(_UpperCamelCase , lambda _UpperCamelCase : 1 , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1.0 , _UpperCamelCase ) )
return 1.0
return LambdaLR(_UpperCamelCase , _UpperCamelCase , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = -1 ):
"""simple docstring"""
lowercase_ : List[Any] = {}
lowercase_ : Dict = step_rules.split("," )
for rule_str in rule_list[:-1]:
lowercase_ , lowercase_ : Any = rule_str.split(":" )
lowercase_ : List[Any] = int(_UpperCamelCase )
lowercase_ : int = float(_UpperCamelCase )
lowercase_ : Optional[int] = value
lowercase_ : Union[str, Any] = float(rule_list[-1] )
def create_rules_function(_UpperCamelCase , _UpperCamelCase ):
def rule_func(_UpperCamelCase ) -> float:
lowercase_ : Optional[Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_UpperCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowercase_ : Optional[int] = create_rules_function(_UpperCamelCase , _UpperCamelCase )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , last_epoch=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=-1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0.5 , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
lowercase_ : List[str] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_UpperCamelCase ) * 2.0 * progress )) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = -1 ):
"""simple docstring"""
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
lowercase_ : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_UpperCamelCase ) * progress) % 1.0) )) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=1e-7 , _UpperCamelCase=1.0 , _UpperCamelCase=-1 ):
"""simple docstring"""
lowercase_ : Dict = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(_UpperCamelCase ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowercase_ : int = lr_init - lr_end
lowercase_ : Optional[int] = num_training_steps - num_warmup_steps
lowercase_ : Optional[Any] = 1 - (current_step - num_warmup_steps) / decay_steps
lowercase_ : List[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCamelCase__ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 1 , _UpperCamelCase = 1.0 , _UpperCamelCase = -1 , ):
"""simple docstring"""
lowercase_ : Any = SchedulerType(_UpperCamelCase )
lowercase_ : List[Any] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_UpperCamelCase , last_epoch=_UpperCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_UpperCamelCase , step_rules=_UpperCamelCase , last_epoch=_UpperCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_UpperCamelCase , num_warmup_steps=_UpperCamelCase , last_epoch=_UpperCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , num_cycles=_UpperCamelCase , last_epoch=_UpperCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , power=_UpperCamelCase , last_epoch=_UpperCamelCase , )
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , last_epoch=_UpperCamelCase )
| 640 | 0 |
import math
from numpy import inf
from scipy.integrate import quad
def UpperCAmelCase__ ( lowerCamelCase_ : float ):
if num <= 0:
raise ValueError('math domain error' )
return quad(lowerCamelCase_ , 0 , lowerCamelCase_ , args=(lowerCamelCase_) )[0]
def UpperCAmelCase__ ( lowerCamelCase_ : float , lowerCamelCase_ : float ):
return math.pow(lowerCamelCase_ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 47 |
"""simple docstring"""
import random
def _lowerCamelCase ( lowerCamelCase__ : Tuple , lowerCamelCase__ : Dict , lowerCamelCase__ : str ):
lowercase__ : List[Any] = a[left_index]
lowercase__ : List[Any] = left_index + 1
for j in range(left_index + 1 , lowerCamelCase__ ):
if a[j] < pivot:
lowercase__ , lowercase__ : Dict = a[i], a[j]
i += 1
lowercase__ , lowercase__ : Any = a[i - 1], a[left_index]
return i - 1
def _lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] ):
if left < right:
lowercase__ : str = random.randint(lowerCamelCase__ , right - 1 )
lowercase__ , lowercase__ : Any = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
lowercase__ : str = partition(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
quick_sort_random(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
lowerCamelCase__ , pivot_index + 1 , lowerCamelCase__ ) # recursive quicksort to the right of the pivot point
def _lowerCamelCase ( ):
lowercase__ : List[Any] = input("""Enter numbers separated by a comma:\n""" ).strip()
lowercase__ : int = [int(lowerCamelCase__ ) for item in user_input.split(""",""" )]
quick_sort_random(lowerCamelCase__ , 0 , len(lowerCamelCase__ ) )
print(lowerCamelCase__ )
if __name__ == "__main__":
main() | 200 | 0 |
class __lowerCAmelCase : # Public class to implement a graph
def __init__( self : List[Any] , A : int , A : int , A : list[list[bool]]) -> None:
"""simple docstring"""
_UpperCAmelCase = row
_UpperCAmelCase = col
_UpperCAmelCase = graph
def _lowerCamelCase ( self : Tuple , A : int , A : int , A : list[list[bool]]) -> bool:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def _lowerCamelCase ( self : Optional[Any] , A : int , A : int , A : list[list[bool]]) -> None:
"""simple docstring"""
_UpperCAmelCase = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
_UpperCAmelCase = [-1, 0, 1, -1, 1, -1, 0, 1]
_UpperCAmelCase = True # Make those cells visited
for k in range(8):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase__):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase__)
def _lowerCamelCase ( self : int) -> int: # And finally, count all islands.
"""simple docstring"""
_UpperCAmelCase = [[False for j in range(self.COL)] for i in range(self.ROW)]
_UpperCAmelCase = 0
for i in range(self.ROW):
for j in range(self.COL):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
count += 1
return count
| 710 |
from collections import Counter
from timeit import timeit
def A ( _UpperCAmelCase : str = "" , ) -> bool:
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def A ( _UpperCAmelCase : str = "" ) -> bool:
'''simple docstring'''
if len(_UpperCAmelCase ) == 0:
return True
_UpperCAmelCase = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_UpperCAmelCase = {}
for character in lower_case_input_str:
_UpperCAmelCase = character_freq_dict.get(_UpperCAmelCase , 0 ) + 1
_UpperCAmelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def A ( _UpperCAmelCase : str = "" ) -> None:
'''simple docstring'''
print('\nFor string = ' , _UpperCAmelCase , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(_UpperCAmelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(_UpperCAmelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
UpperCAmelCase__ = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
UpperCAmelCase__ = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"""{check_str} can {"" if status else "not "}be rearranged as a palindrome""")
| 639 | 0 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__lowercase : Any = logging.getLogger(__name__)
@dataclass(frozen=snake_case )
class _A :
'''simple docstring'''
__lowerCamelCase : str
__lowerCamelCase : str
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : Optional[str] = None
@dataclass(frozen=snake_case )
class _A :
'''simple docstring'''
__lowerCamelCase : List[int]
__lowerCamelCase : Optional[List[int]] = None
__lowerCamelCase : Optional[List[int]] = None
__lowerCamelCase : Optional[Union[int, float]] = None
__lowerCamelCase : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : List[InputFeatures]
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_ = False ,):
'''simple docstring'''
snake_case : str = hans_processors[task]()
snake_case : str = os.path.join(
SCREAMING_SNAKE_CASE_ ,"""cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""" ,tokenizer.__class__.__name__ ,str(SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ ,) ,)
snake_case : Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case , snake_case : List[Any] = label_list[2], label_list[1]
snake_case : List[Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case : Any = cached_features_file + """.lock"""
with FileLock(SCREAMING_SNAKE_CASE_ ):
if os.path.exists(SCREAMING_SNAKE_CASE_ ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
snake_case : int = torch.load(SCREAMING_SNAKE_CASE_ )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
snake_case : Union[str, Any] = (
processor.get_dev_examples(SCREAMING_SNAKE_CASE_ ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE_ )
)
logger.info("""Training examples: %s""" ,len(SCREAMING_SNAKE_CASE_ ) )
snake_case : Dict = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
logger.info("""Saving features into cached file %s""" ,SCREAMING_SNAKE_CASE_ )
torch.save(self.features ,SCREAMING_SNAKE_CASE_ )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self.features[i]
def snake_case_ ( self ):
'''simple docstring'''
return self.label_list
if is_tf_available():
import tensorflow as tf
class _A :
'''simple docstring'''
__lowerCamelCase : List[InputFeatures]
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 128 ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_ = False ,):
'''simple docstring'''
snake_case : Any = hans_processors[task]()
snake_case : List[str] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case , snake_case : int = label_list[2], label_list[1]
snake_case : List[str] = label_list
snake_case : int = processor.get_dev_examples(SCREAMING_SNAKE_CASE_ ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE_ )
snake_case : Any = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) ,desc="""convert examples to features""" ):
if ex_index % 10000 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(SCREAMING_SNAKE_CASE_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
snake_case : Any = tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE_ ,(
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
) ,(
{
"""example_id""": tf.TensorShape([] ),
"""input_ids""": tf.TensorShape([None, None] ),
"""attention_mask""": tf.TensorShape([None, None] ),
"""token_type_ids""": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) ,)
def snake_case_ ( self ):
'''simple docstring'''
return self.dataset
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self.features[i]
def snake_case_ ( self ):
'''simple docstring'''
return self.label_list
class _A ( snake_case ):
'''simple docstring'''
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE_ ,"""heuristics_train_set.txt""" ) ) ,"""train""" )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE_ ,"""heuristics_evaluation_set.txt""" ) ) ,"""dev""" )
def snake_case_ ( self ):
'''simple docstring'''
return ["contradiction", "entailment", "neutral"]
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : List[str] = []
for i, line in enumerate(SCREAMING_SNAKE_CASE_ ):
if i == 0:
continue
snake_case : Any = """%s-%s""" % (set_type, line[0])
snake_case : Optional[int] = line[5]
snake_case : Union[str, Any] = line[6]
snake_case : Optional[Any] = line[7][2:] if line[7].startswith("""ex""" ) else line[7]
snake_case : Dict = line[0]
examples.append(InputExample(guid=SCREAMING_SNAKE_CASE_ ,text_a=SCREAMING_SNAKE_CASE_ ,text_b=SCREAMING_SNAKE_CASE_ ,label=SCREAMING_SNAKE_CASE_ ,pairID=SCREAMING_SNAKE_CASE_ ) )
return examples
def lowercase ( __A : List[InputExample] , __A : List[str] , __A : int , __A : PreTrainedTokenizer , ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = {label: i for i, label in enumerate(__A )}
snake_case : Union[str, Any] = []
for ex_index, example in tqdm.tqdm(enumerate(__A ) , desc="""convert examples to features""" ):
if ex_index % 1_0000 == 0:
logger.info("""Writing example %d""" % (ex_index) )
snake_case : Union[str, Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=__A , max_length=__A , padding="""max_length""" , truncation=__A , return_overflowing_tokens=__A , )
snake_case : Tuple = label_map[example.label] if example.label in label_map else 0
snake_case : Tuple = int(example.pairID )
features.append(InputFeatures(**__A , label=__A , pairID=__A ) )
for i, example in enumerate(examples[:5] ):
logger.info("""*** Example ***""" )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
__lowercase : Dict = {
'''hans''': 3,
}
__lowercase : Union[str, Any] = {
'''hans''': HansProcessor,
}
| 36 |
class snake_case_ :
'''simple docstring'''
def __init__( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[Any]=None ) -> Dict:
'''simple docstring'''
__lowercase = data
__lowercase = previous
__lowercase = next_node
def __str__( self : int ) -> str:
'''simple docstring'''
return F"{self.data}"
def UpperCAmelCase ( self : int ) -> int:
'''simple docstring'''
return self.data
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
return self.next
def UpperCAmelCase ( self : Optional[int] ) -> Any:
'''simple docstring'''
return self.previous
class snake_case_ :
'''simple docstring'''
def __init__( self : Tuple , __lowerCamelCase : Optional[Any] ) -> str:
'''simple docstring'''
__lowercase = head
def __iter__( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return self
def UpperCAmelCase ( self : str ) -> str:
'''simple docstring'''
if not self.current:
raise StopIteration
else:
__lowercase = self.current.get_data()
__lowercase = self.current.get_next()
return value
class snake_case_ :
'''simple docstring'''
def __init__( self : List[str] ) -> Any:
'''simple docstring'''
__lowercase = None # First node in list
__lowercase = None # Last node in list
def __str__( self : int ) -> Any:
'''simple docstring'''
__lowercase = self.head
__lowercase = []
while current is not None:
nodes.append(current.get_data() )
__lowercase = current.get_next()
return " ".join(str(__lowerCamelCase ) for node in nodes )
def __contains__( self : Tuple , __lowerCamelCase : int ) -> List[Any]:
'''simple docstring'''
__lowercase = self.head
while current:
if current.get_data() == value:
return True
__lowercase = current.get_next()
return False
def __iter__( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return LinkedListIterator(self.head )
def UpperCAmelCase ( self : Any ) -> str:
'''simple docstring'''
if self.head:
return self.head.get_data()
return None
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
'''simple docstring'''
if self.tail:
return self.tail.get_data()
return None
def UpperCAmelCase ( self : str , __lowerCamelCase : Node ) -> None:
'''simple docstring'''
if self.head is None:
__lowercase = node
__lowercase = node
else:
self.insert_before_node(self.head , __lowerCamelCase )
def UpperCAmelCase ( self : Dict , __lowerCamelCase : Node ) -> None:
'''simple docstring'''
if self.head is None:
self.set_head(__lowerCamelCase )
else:
self.insert_after_node(self.tail , __lowerCamelCase )
def UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : int ) -> None:
'''simple docstring'''
__lowercase = Node(__lowerCamelCase )
if self.head is None:
self.set_head(__lowerCamelCase )
else:
self.set_tail(__lowerCamelCase )
def UpperCAmelCase ( self : int , __lowerCamelCase : Node , __lowerCamelCase : Node ) -> None:
'''simple docstring'''
__lowercase = node
__lowercase = node.previous
if node.get_previous() is None:
__lowercase = node_to_insert
else:
__lowercase = node_to_insert
__lowercase = node_to_insert
def UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Node , __lowerCamelCase : Node ) -> None:
'''simple docstring'''
__lowercase = node
__lowercase = node.next
if node.get_next() is None:
__lowercase = node_to_insert
else:
__lowercase = node_to_insert
__lowercase = node_to_insert
def UpperCAmelCase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : int ) -> None:
'''simple docstring'''
__lowercase = 1
__lowercase = Node(__lowerCamelCase )
__lowercase = self.head
while node:
if current_position == position:
self.insert_before_node(__lowerCamelCase , __lowerCamelCase )
return
current_position += 1
__lowercase = node.next
self.insert_after_node(self.tail , __lowerCamelCase )
def UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : int ) -> Node:
'''simple docstring'''
__lowercase = self.head
while node:
if node.get_data() == item:
return node
__lowercase = node.get_next()
raise Exception('Node not found' )
def UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : List[str] ) -> Dict:
'''simple docstring'''
if (node := self.get_node(__lowerCamelCase )) is not None:
if node == self.head:
__lowercase = self.head.get_next()
if node == self.tail:
__lowercase = self.tail.get_previous()
self.remove_node_pointers(__lowerCamelCase )
@staticmethod
def UpperCAmelCase ( __lowerCamelCase : Node ) -> None:
'''simple docstring'''
if node.get_next():
__lowercase = node.previous
if node.get_previous():
__lowercase = node.next
__lowercase = None
__lowercase = None
def UpperCAmelCase ( self : str ) -> List[str]:
'''simple docstring'''
return self.head is None
def SCREAMING_SNAKE_CASE ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 375 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : List[str] = logging.get_logger(__name__)
_lowercase : Optional[Any] = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : int = "pegasus"
a__ : List[str] = ["past_key_values"]
a__ : Tuple = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : str , _lowercase : Optional[Any]=5_02_65 , _lowercase : Union[str, Any]=10_24 , _lowercase : Tuple=12 , _lowercase : int=40_96 , _lowercase : Dict=16 , _lowercase : int=12 , _lowercase : str=40_96 , _lowercase : Union[str, Any]=16 , _lowercase : Dict=0.0 , _lowercase : List[Any]=0.0 , _lowercase : Optional[int]=True , _lowercase : Any=True , _lowercase : Optional[Any]="gelu" , _lowercase : Dict=10_24 , _lowercase : Any=0.1 , _lowercase : int=0.0 , _lowercase : Any=0.0 , _lowercase : List[Any]=0.02 , _lowercase : List[Any]=0 , _lowercase : Any=False , _lowercase : Optional[Any]=0 , _lowercase : Optional[int]=1 , _lowercase : List[str]=1 , **_lowercase : Any , ):
__UpperCAmelCase = vocab_size
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = d_model
__UpperCAmelCase = encoder_ffn_dim
__UpperCAmelCase = encoder_layers
__UpperCAmelCase = encoder_attention_heads
__UpperCAmelCase = decoder_ffn_dim
__UpperCAmelCase = decoder_layers
__UpperCAmelCase = decoder_attention_heads
__UpperCAmelCase = dropout
__UpperCAmelCase = attention_dropout
__UpperCAmelCase = activation_dropout
__UpperCAmelCase = activation_function
__UpperCAmelCase = init_std
__UpperCAmelCase = encoder_layerdrop
__UpperCAmelCase = decoder_layerdrop
__UpperCAmelCase = use_cache
__UpperCAmelCase = encoder_layers
__UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_lowercase , eos_token_id=_lowercase , is_encoder_decoder=_lowercase , decoder_start_token_id=_lowercase , forced_eos_token_id=_lowercase , **_lowercase , )
@property
def a ( self : Union[str, Any] ):
return self.encoder_attention_heads
@property
def a ( self : Tuple ):
return self.d_model
| 397 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Tuple = logging.get_logger(__name__)
_lowercase : Optional[int] = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : int = "realm"
def __init__( self : Optional[int] , _lowercase : Tuple=3_05_22 , _lowercase : List[str]=7_68 , _lowercase : Tuple=1_28 , _lowercase : int=12 , _lowercase : Tuple=12 , _lowercase : List[Any]=8 , _lowercase : Tuple=30_72 , _lowercase : Tuple="gelu_new" , _lowercase : str=0.1 , _lowercase : Union[str, Any]=0.1 , _lowercase : Union[str, Any]=5_12 , _lowercase : Optional[int]=2 , _lowercase : Any=0.02 , _lowercase : Union[str, Any]=1E-12 , _lowercase : Dict=2_56 , _lowercase : Optional[int]=10 , _lowercase : List[Any]=1E-3 , _lowercase : Optional[int]=5 , _lowercase : List[str]=3_20 , _lowercase : Optional[int]=13_35_37_18 , _lowercase : List[Any]=50_00 , _lowercase : Dict=1 , _lowercase : int=0 , _lowercase : Any=2 , **_lowercase : Optional[Any] , ):
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
# Common config
__UpperCAmelCase = vocab_size
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = hidden_size
__UpperCAmelCase = retriever_proj_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = num_candidates
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = initializer_range
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = layer_norm_eps
# Reader config
__UpperCAmelCase = span_hidden_size
__UpperCAmelCase = max_span_width
__UpperCAmelCase = reader_layer_norm_eps
__UpperCAmelCase = reader_beam_size
__UpperCAmelCase = reader_seq_len
# Retrieval config
__UpperCAmelCase = num_block_records
__UpperCAmelCase = searcher_beam_size
| 397 | 1 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def _lowercase ( __lowerCamelCase : int = 8 ) -> str:
'''simple docstring'''
UpperCamelCase__ : Any = ascii_letters + digits + punctuation
return "".join(secrets.choice(__lowerCamelCase ) for _ in range(__lowerCamelCase ) )
def _lowercase ( __lowerCamelCase : str ,__lowerCamelCase : int ) -> str:
'''simple docstring'''
i -= len(__lowerCamelCase )
UpperCamelCase__ : Optional[int] = i // 3
UpperCamelCase__ : Optional[int] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
UpperCamelCase__ : Optional[int] = (
chars_incl
+ random(__lowerCamelCase ,quotient + remainder )
+ random(__lowerCamelCase ,__lowerCamelCase )
+ random(__lowerCamelCase ,__lowerCamelCase )
)
UpperCamelCase__ : Dict = list(__lowerCamelCase )
shuffle(__lowerCamelCase )
return "".join(__lowerCamelCase )
# random is a generalised function for letters, characters and numbers
def _lowercase ( __lowerCamelCase : str ,__lowerCamelCase : int ) -> str:
'''simple docstring'''
return "".join(secrets.choice(__lowerCamelCase ) for _ in range(__lowerCamelCase ) )
def _lowercase ( __lowerCamelCase : Union[str, Any] ,__lowerCamelCase : int ) -> List[Any]:
'''simple docstring'''
pass # Put your code here...
def _lowercase ( __lowerCamelCase : Union[str, Any] ,__lowerCamelCase : List[str] ) -> str:
'''simple docstring'''
pass # Put your code here...
def _lowercase ( __lowerCamelCase : Tuple ,__lowerCamelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass # Put your code here...
def _lowercase ( __lowerCamelCase : str ,__lowerCamelCase : int = 8 ) -> bool:
'''simple docstring'''
if len(__lowerCamelCase ) < min_length:
# Your Password must be at least 8 characters long
return False
UpperCamelCase__ : int = any(char in ascii_uppercase for char in password )
UpperCamelCase__ : Any = any(char in ascii_lowercase for char in password )
UpperCamelCase__ : Dict = any(char in digits for char in password )
UpperCamelCase__ : int = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def _lowercase ( ) -> Any:
'''simple docstring'''
UpperCamelCase__ : str = int(input('''Please indicate the max length of your password: ''' ).strip() )
UpperCamelCase__ : Optional[int] = input(
'''Please indicate the characters that must be in your password: ''' ).strip()
print('''Password generated:''' ,password_generator(__lowerCamelCase ) )
print(
'''Alternative Password generated:''' ,alternative_password_generator(__lowerCamelCase ,__lowerCamelCase ) ,)
print('''[If you are thinking of using this passsword, You better save it.]''' )
if __name__ == "__main__":
main()
| 344 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class UpperCamelCase__ ( __lowerCamelCase ):
a__ : List[str] = 'philschmid/bart-large-cnn-samsum'
a__ : List[Any] = (
'This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '
'and returns a summary of the text.'
)
a__ : int = 'summarizer'
a__ : int = AutoTokenizer
a__ : Any = AutoModelForSeqaSeqLM
a__ : Optional[int] = ['text']
a__ : Optional[int] = ['text']
def __lowercase( self : int, __lowerCamelCase : List[str] ) -> List[Any]:
return self.pre_processor(__lowerCamelCase, return_tensors='''pt''', truncation=__lowerCamelCase )
def __lowercase( self : int, __lowerCamelCase : Union[str, Any] ) -> Union[str, Any]:
return self.model.generate(**__lowerCamelCase )[0]
def __lowercase( self : Optional[Any], __lowerCamelCase : Optional[int] ) -> Any:
return self.pre_processor.decode(__lowerCamelCase, skip_special_tokens=__lowerCamelCase, clean_up_tokenization_spaces=__lowerCamelCase )
| 344 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : Optional[torch.FloatTensor] = None
__a : torch.FloatTensor = None
__a : Optional[Tuple[torch.FloatTensor]] = None
__a : Optional[Tuple[torch.FloatTensor]] = None
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__=5_12 , lowerCAmelCase__="cls" , lowerCAmelCase__=False , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
__lowercase = project_dim
__lowercase = pooler_fn
__lowercase = learn_encoder
__lowercase = use_attention_mask
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : Dict = [r'''pooler''', r'''logit_scale''']
__a : Tuple = [r'''position_ids''', r'''predictions.decoder.bias''']
__a : List[str] = '''roberta'''
__a : Union[str, Any] = RobertaSeriesConfig
def __init__( self , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
__lowercase = XLMRobertaModel(lowerCAmelCase__ )
__lowercase = nn.Linear(config.hidden_size , config.project_dim )
__lowercase = getattr(lowerCAmelCase__ , '''has_pre_transformation''' , lowerCAmelCase__ )
if self.has_pre_transformation:
__lowercase = nn.Linear(config.hidden_size , config.project_dim )
__lowercase = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ) -> Optional[Any]:
'''simple docstring'''
__lowercase = return_dict if return_dict is not None else self.config.use_return_dict
__lowercase = self.base_model(
input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , inputs_embeds=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , output_attentions=lowerCAmelCase__ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=lowerCAmelCase__ , )
if self.has_pre_transformation:
__lowercase = outputs['''hidden_states'''][-2]
__lowercase = self.pre_LN(lowerCAmelCase__ )
__lowercase = self.transformation_pre(lowerCAmelCase__ )
return TransformationModelOutput(
projection_state=lowerCAmelCase__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
__lowercase = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=lowerCAmelCase__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , ) | 522 | import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__a : str = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__a : Union[str, Any] = 2_5_0_0_0_4
__a : Optional[int] = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( _UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
__a : str = MBartTokenizer
__a : int = MBartTokenizerFast
__a : List[Any] = True
__a : int = True
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = MBartTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
__lowercase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCAmelCase__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
__lowercase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__lowercase = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__lowercase = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__lowercase = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
__lowercase = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
__lowercase = tempfile.mkdtemp()
__lowercase = tokenizer_r.save_pretrained(lowerCAmelCase__ )
__lowercase = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
__lowercase = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
__lowercase = tokenizer_r.from_pretrained(lowerCAmelCase__ )
__lowercase = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=True
__lowercase = tempfile.mkdtemp()
__lowercase = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
__lowercase = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Checks everything loads correctly in the same way
__lowercase = tokenizer_r.from_pretrained(lowerCAmelCase__ )
__lowercase = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
# Save tokenizer rust, legacy_format=False
__lowercase = tempfile.mkdtemp()
__lowercase = tokenizer_r.save_pretrained(lowerCAmelCase__ , legacy_format=lowerCAmelCase__ )
__lowercase = tokenizer_p.save_pretrained(lowerCAmelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__lowercase = tokenizer_r.from_pretrained(lowerCAmelCase__ )
__lowercase = tokenizer_p.from_pretrained(lowerCAmelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
shutil.rmtree(lowerCAmelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
__a : Optional[int] = '''facebook/mbart-large-en-ro'''
__a : Any = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
__a : Optional[Any] = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
__a : List[Any] = [8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2, EN_CODE]
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ) -> Any:
'''simple docstring'''
__lowercase = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
__lowercase = 1
return cls
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 25_00_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 25_00_04 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 25_00_20 )
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
self.assertIn(lowerCAmelCase__ , self.tokenizer.all_special_ids )
__lowercase = [RO_CODE, 8_84, 90_19, 96, 9, 9_16, 8_67_92, 36, 1_87_43, 1_55_96, 5, 2]
__lowercase = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
__lowercase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , lowerCAmelCase__ )
__lowercase = 10
__lowercase = self.tokenizer(lowerCAmelCase__ , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowerCAmelCase__ )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_00_26, 25_00_01] )
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = tempfile.mkdtemp()
__lowercase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase__ )
__lowercase = MBartTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase__ )
@require_torch
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , return_tensors='''pt''' )
__lowercase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
__lowercase = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__lowercase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.tokenizer(self.src_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=3 , return_tensors='''pt''' )
__lowercase = self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=10 , return_tensors='''pt''' )
__lowercase = targets['''input_ids''']
__lowercase = shift_tokens_right(lowerCAmelCase__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[62, 30_34, 2, 25_00_04]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_00_01,
} , ) | 522 | 1 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCAmelCase = logging.get_logger(__name__)
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Any = "linear"
UpperCAmelCase : Any = "cosine"
UpperCAmelCase : str = "cosine_with_restarts"
UpperCAmelCase : Optional[int] = "polynomial"
UpperCAmelCase : str = "constant"
UpperCAmelCase : int = "constant_with_warmup"
UpperCAmelCase : Optional[Any] = "piecewise_constant"
def _snake_case ( _SCREAMING_SNAKE_CASE : Optimizer , _SCREAMING_SNAKE_CASE : int = -1 ) -> int:
"""simple docstring"""
return LambdaLR(_SCREAMING_SNAKE_CASE , lambda _SCREAMING_SNAKE_CASE : 1 , last_epoch=_SCREAMING_SNAKE_CASE )
def _snake_case ( _SCREAMING_SNAKE_CASE : Optimizer , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int = -1 ) -> List[str]:
"""simple docstring"""
def lr_lambda(_SCREAMING_SNAKE_CASE : int ):
if current_step < num_warmup_steps:
return float(_SCREAMING_SNAKE_CASE ) / float(max(1.0 , _SCREAMING_SNAKE_CASE ) )
return 1.0
return LambdaLR(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , last_epoch=_SCREAMING_SNAKE_CASE )
def _snake_case ( _SCREAMING_SNAKE_CASE : Optimizer , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int = -1 ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = {}
lowerCAmelCase = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
lowerCAmelCase, lowerCAmelCase = rule_str.split(""":""" )
lowerCAmelCase = int(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = float(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = value
lowerCAmelCase = float(rule_list[-1] )
def create_rules_function(_SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] ):
def rule_func(_SCREAMING_SNAKE_CASE : int ) -> float:
lowerCAmelCase = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_SCREAMING_SNAKE_CASE ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowerCAmelCase = create_rules_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return LambdaLR(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , last_epoch=_SCREAMING_SNAKE_CASE )
def _snake_case ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[str]=-1 ) -> int:
"""simple docstring"""
def lr_lambda(_SCREAMING_SNAKE_CASE : int ):
if current_step < num_warmup_steps:
return float(_SCREAMING_SNAKE_CASE ) / float(max(1 , _SCREAMING_SNAKE_CASE ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( _SCREAMING_SNAKE_CASE : Optimizer , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float = 0.5 , _SCREAMING_SNAKE_CASE : int = -1 ) -> Union[str, Any]:
"""simple docstring"""
def lr_lambda(_SCREAMING_SNAKE_CASE : List[str] ):
if current_step < num_warmup_steps:
return float(_SCREAMING_SNAKE_CASE ) / float(max(1 , _SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_SCREAMING_SNAKE_CASE ) * 2.0 * progress )) )
return LambdaLR(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( _SCREAMING_SNAKE_CASE : Optimizer , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int = 1 , _SCREAMING_SNAKE_CASE : int = -1 ) -> str:
"""simple docstring"""
def lr_lambda(_SCREAMING_SNAKE_CASE : Optional[int] ):
if current_step < num_warmup_steps:
return float(_SCREAMING_SNAKE_CASE ) / float(max(1 , _SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_SCREAMING_SNAKE_CASE ) * progress) % 1.0) )) )
return LambdaLR(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str=1E-7 , _SCREAMING_SNAKE_CASE : List[Any]=1.0 , _SCREAMING_SNAKE_CASE : Any=-1 ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f'lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})' )
def lr_lambda(_SCREAMING_SNAKE_CASE : int ):
if current_step < num_warmup_steps:
return float(_SCREAMING_SNAKE_CASE ) / float(max(1 , _SCREAMING_SNAKE_CASE ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowerCAmelCase = lr_init - lr_end
lowerCAmelCase = num_training_steps - num_warmup_steps
lowerCAmelCase = 1 - (current_step - num_warmup_steps) / decay_steps
lowerCAmelCase = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def _snake_case ( _SCREAMING_SNAKE_CASE : Union[str, SchedulerType] , _SCREAMING_SNAKE_CASE : Optimizer , _SCREAMING_SNAKE_CASE : Optional[str] = None , _SCREAMING_SNAKE_CASE : Optional[int] = None , _SCREAMING_SNAKE_CASE : Optional[int] = None , _SCREAMING_SNAKE_CASE : int = 1 , _SCREAMING_SNAKE_CASE : float = 1.0 , _SCREAMING_SNAKE_CASE : int = -1 , ) -> int:
"""simple docstring"""
lowerCAmelCase = SchedulerType(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_SCREAMING_SNAKE_CASE , last_epoch=_SCREAMING_SNAKE_CASE )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_SCREAMING_SNAKE_CASE , step_rules=_SCREAMING_SNAKE_CASE , last_epoch=_SCREAMING_SNAKE_CASE )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f'{name} requires `num_warmup_steps`, please provide that argument.' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_SCREAMING_SNAKE_CASE , num_warmup_steps=_SCREAMING_SNAKE_CASE , last_epoch=_SCREAMING_SNAKE_CASE )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f'{name} requires `num_training_steps`, please provide that argument.' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_SCREAMING_SNAKE_CASE , num_warmup_steps=_SCREAMING_SNAKE_CASE , num_training_steps=_SCREAMING_SNAKE_CASE , num_cycles=_SCREAMING_SNAKE_CASE , last_epoch=_SCREAMING_SNAKE_CASE , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_SCREAMING_SNAKE_CASE , num_warmup_steps=_SCREAMING_SNAKE_CASE , num_training_steps=_SCREAMING_SNAKE_CASE , power=_SCREAMING_SNAKE_CASE , last_epoch=_SCREAMING_SNAKE_CASE , )
return schedule_func(
_SCREAMING_SNAKE_CASE , num_warmup_steps=_SCREAMING_SNAKE_CASE , num_training_steps=_SCREAMING_SNAKE_CASE , last_epoch=_SCREAMING_SNAKE_CASE ) | 433 |
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(
_lowerCAmelCase , R"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
def __snake_case ( self , A_ ) -> np.ndarray:
if self.framework == "tf":
lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=A_ )
else:
raise ValueError("""Unsupported framework""" )
return masked_index
def __snake_case ( self , A_ ) -> np.ndarray:
lowerCAmelCase = self.get_masked_index(A_ )
lowerCAmelCase = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , f'No mask_token ({self.tokenizer.mask_token}) found on the input' , )
def __snake_case ( self , A_ ) -> str:
if isinstance(A_ , A_ ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(A_ )
def __snake_case ( self , A_ , A_=None , **A_ ) -> Dict[str, GenericTensor]:
if return_tensors is None:
lowerCAmelCase = self.framework
lowerCAmelCase = self.tokenizer(A_ , return_tensors=A_ )
self.ensure_exactly_one_mask_token(A_ )
return model_inputs
def __snake_case ( self , A_ ) -> str:
lowerCAmelCase = self.model(**A_ )
lowerCAmelCase = model_inputs["""input_ids"""]
return model_outputs
def __snake_case ( self , A_ , A_=5 , A_=None ) -> List[str]:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
lowerCAmelCase = target_ids.shape[0]
lowerCAmelCase = model_outputs["""input_ids"""][0]
lowerCAmelCase = model_outputs["""logits"""]
if self.framework == "tf":
lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
lowerCAmelCase = outputs.numpy()
lowerCAmelCase = outputs[0, masked_index, :]
lowerCAmelCase = stable_softmax(A_ , axis=-1 )
if target_ids is not None:
lowerCAmelCase = tf.gather_nd(tf.squeeze(A_ , 0 ) , target_ids.reshape(-1 , 1 ) )
lowerCAmelCase = tf.expand_dims(A_ , 0 )
lowerCAmelCase = tf.math.top_k(A_ , k=A_ )
lowerCAmelCase, lowerCAmelCase = topk.values.numpy(), topk.indices.numpy()
else:
lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=A_ ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
lowerCAmelCase = outputs[0, masked_index, :]
lowerCAmelCase = logits.softmax(dim=-1 )
if target_ids is not None:
lowerCAmelCase = probs[..., target_ids]
lowerCAmelCase, lowerCAmelCase = probs.topk(A_ )
lowerCAmelCase = []
lowerCAmelCase = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
lowerCAmelCase = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
lowerCAmelCase = input_ids.numpy().copy()
if target_ids is not None:
lowerCAmelCase = target_ids[p].tolist()
lowerCAmelCase = p
# Filter padding out:
lowerCAmelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
lowerCAmelCase = self.tokenizer.decode(A_ , skip_special_tokens=A_ )
lowerCAmelCase = {"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p] ), """sequence""": sequence}
row.append(A_ )
result.append(A_ )
if single_mask:
return result[0]
return result
def __snake_case ( self , A_ , A_=None ) -> Optional[int]:
if isinstance(A_ , A_ ):
lowerCAmelCase = [targets]
try:
lowerCAmelCase = self.tokenizer.get_vocab()
except Exception:
lowerCAmelCase = {}
lowerCAmelCase = []
for target in targets:
lowerCAmelCase = vocab.get(A_ , A_ )
if id_ is None:
lowerCAmelCase = self.tokenizer(
A_ , add_special_tokens=A_ , return_attention_mask=A_ , return_token_type_ids=A_ , max_length=1 , truncation=A_ , )["""input_ids"""]
if len(A_ ) == 0:
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
"""We cannot replace it with anything meaningful, ignoring it""" )
continue
lowerCAmelCase = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f'The specified target token `{target}` does not exist in the model vocabulary. '
f'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.' )
target_ids.append(id_ )
lowerCAmelCase = list(set(A_ ) )
if len(A_ ) == 0:
raise ValueError("""At least one target must be provided when passed.""" )
lowerCAmelCase = np.array(A_ )
return target_ids
def __snake_case ( self , A_=None , A_=None ) -> List[Any]:
lowerCAmelCase = {}
if targets is not None:
lowerCAmelCase = self.get_target_ids(A_ , A_ )
lowerCAmelCase = target_ids
if top_k is not None:
lowerCAmelCase = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , """The tokenizer does not define a `mask_token`.""" )
return {}, {}, postprocess_params
def __call__( self , A_ , *A_ , **A_ ) -> List[str]:
lowerCAmelCase = super().__call__(A_ , **A_ )
if isinstance(A_ , A_ ) and len(A_ ) == 1:
return outputs[0]
return outputs | 433 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 701 |
from pathlib import Path
import numpy as np
from PIL import Image
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
__lowercase , __lowercase , __lowercase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_989 * r + 0.5_870 * g + 0.1_140 * b
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
return (gray > 127) & (gray <= 255)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : np.ndarray ) -> np.ndarray:
__lowercase = np.zeros_like(SCREAMING_SNAKE_CASE )
__lowercase = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
__lowercase = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
__lowercase = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
__lowercase = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
SCREAMING_SNAKE_CASE__ = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
SCREAMING_SNAKE_CASE__ = np.array(Image.open(lena_path))
# kernel to be applied
SCREAMING_SNAKE_CASE__ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
SCREAMING_SNAKE_CASE__ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
SCREAMING_SNAKE_CASE__ = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 688 | 0 |
from __future__ import annotations
import os
from collections.abc import Mapping
_lowercase = tuple[int, int]
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[Any] ,lowerCAmelCase__ : set[int] ,lowerCAmelCase__ : Mapping[EdgeT, int] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = vertices
lowerCAmelCase_ : List[Any] = {
(min(_lowercase ), max(_lowercase )): weight for edge, weight in edges.items()
}
def UpperCAmelCase_ ( self : Union[str, Any] ,lowerCAmelCase__ : EdgeT ,lowerCAmelCase__ : int ) -> List[str]:
'''simple docstring'''
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
lowerCAmelCase_ : Dict = weight
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = Graph({min(self.vertices )} ,{} )
lowerCAmelCase_ : Union[str, Any] = 42
lowerCAmelCase_ : Dict = 42
lowerCAmelCase_ : int = 42
lowerCAmelCase_ : str = 42
while len(subgraph.vertices ) < len(self.vertices ):
lowerCAmelCase_ : Tuple = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
lowerCAmelCase_ : Optional[int] = edge
lowerCAmelCase_ : Any = weight
subgraph.add_edge(_lowercase ,_lowercase )
return subgraph
def UpperCamelCase ( snake_case__ = "p107_network.txt"):
lowerCAmelCase_ : int = os.path.abspath(os.path.dirname(UpperCamelCase__))
lowerCAmelCase_ : int = os.path.join(UpperCamelCase__ , UpperCamelCase__)
lowerCAmelCase_ : Dict = {}
lowerCAmelCase_ : List[Any] = 42
lowerCAmelCase_ : Union[str, Any] = 42
lowerCAmelCase_ : List[str] = 42
with open(UpperCamelCase__) as f:
lowerCAmelCase_ : Optional[Any] = f.read().strip().split("\n")
lowerCAmelCase_ : Union[str, Any] = [line.split(",") for line in data]
for edgea in range(1 , len(UpperCamelCase__)):
for edgea in range(UpperCamelCase__):
if adjaceny_matrix[edgea][edgea] != "-":
lowerCAmelCase_ : Optional[Any] = int(adjaceny_matrix[edgea][edgea])
lowerCAmelCase_ : str = Graph(set(range(len(UpperCamelCase__))) , UpperCamelCase__)
lowerCAmelCase_ : int = graph.prims_algorithm()
lowerCAmelCase_ : Tuple = sum(graph.edges.values())
lowerCAmelCase_ : Union[str, Any] = sum(subgraph.edges.values())
return initial_total - optimal_total
if __name__ == "__main__":
print(f"{solution() = }")
| 659 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCamelCase : List[str] = logging.get_logger(__name__)
class lowerCamelCase__ ( UpperCAmelCase_ ):
lowerCAmelCase = ["""pixel_values"""]
def __init__( self : Tuple , _lowercase : bool = True , _lowercase : Optional[Dict[str, int]] = None , _lowercase : PILImageResampling = PILImageResampling.BILINEAR , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 255 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , **_lowercase : List[str] , ):
super().__init__(**_lowercase )
A = size if size is not None else {'shortest_edge': 256}
A = get_size_dict(_lowercase , default_to_square=_lowercase )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(_lowercase , param_name='crop_size' )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self : Any , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Tuple , ):
A = get_size_dict(_lowercase , default_to_square=_lowercase )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A = get_resize_output_image_size(_lowercase , size=size['shortest_edge'] , default_to_square=_lowercase )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : List[Any] , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[int] , ):
A = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(_lowercase , size=(size['height'], size['width']) , data_format=_lowercase , **_lowercase )
def __a ( self : int , _lowercase : np.ndarray , _lowercase : float , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Tuple ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : int , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : Any , _lowercase : ImageInput , _lowercase : Optional[bool] = None , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : Dict[str, int] = None , _lowercase : Optional[bool] = None , _lowercase : Optional[float] = None , _lowercase : Optional[bool] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **_lowercase : Any , ):
A = do_resize if do_resize is not None else self.do_resize
A = size if size is not None else self.size
A = get_size_dict(_lowercase , default_to_square=_lowercase )
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(_lowercase , param_name='crop_size' )
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
A = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
A = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_center_crop:
A = [self.center_crop(image=_lowercase , size=_lowercase ) for image in images]
if do_rescale:
A = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
A = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
A = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
def __a ( self : int , _lowercase : List[str] , _lowercase : List[Tuple] = None ):
A = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(_lowercase ):
A = target_sizes.numpy()
A = []
for idx in range(len(_lowercase ) ):
A = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=_lowercase )
A = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowercase )
else:
A = logits.argmax(dim=1 )
A = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 690 | 0 |
from __future__ import annotations
A_ = 1.6_0_2_1e-1_9 # units = C
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, )-> Optional[Any]:
"""simple docstring"""
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 | from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __UpperCAmelCase ( UpperCAmelCase )-> bool:
"""simple docstring"""
lowercase = int(number**0.5 )
return number == sq * sq
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )-> tuple[int, int]:
"""simple docstring"""
lowercase = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
lowercase = x_den * y_den * z_den
lowercase = gcd(UpperCAmelCase, UpperCAmelCase )
top //= hcf
bottom //= hcf
return top, bottom
def __UpperCAmelCase ( UpperCAmelCase = 35 )-> int:
"""simple docstring"""
lowercase = set()
lowercase = 42
lowercase = Fraction(0 )
lowercase = 42
for x_num in range(1, order + 1 ):
for x_den in range(x_num + 1, order + 1 ):
for y_num in range(1, order + 1 ):
for y_den in range(y_num + 1, order + 1 ):
# n=1
lowercase = x_num * y_den + x_den * y_num
lowercase = x_den * y_den
lowercase = gcd(UpperCAmelCase, UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase = add_three(
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
unique_s.add(UpperCAmelCase )
# n=2
lowercase = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
lowercase = x_den * x_den * y_den * y_den
if is_sq(UpperCAmelCase ) and is_sq(UpperCAmelCase ):
lowercase = int(sqrt(UpperCAmelCase ) )
lowercase = int(sqrt(UpperCAmelCase ) )
lowercase = gcd(UpperCAmelCase, UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase = add_three(
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
unique_s.add(UpperCAmelCase )
# n=-1
lowercase = x_num * y_num
lowercase = x_den * y_num + x_num * y_den
lowercase = gcd(UpperCAmelCase, UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase = add_three(
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
unique_s.add(UpperCAmelCase )
# n=2
lowercase = x_num * x_num * y_num * y_num
lowercase = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(UpperCAmelCase ) and is_sq(UpperCAmelCase ):
lowercase = int(sqrt(UpperCAmelCase ) )
lowercase = int(sqrt(UpperCAmelCase ) )
lowercase = gcd(UpperCAmelCase, UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase = add_three(
UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
unique_s.add(UpperCAmelCase )
for num, den in unique_s:
total += Fraction(UpperCAmelCase, UpperCAmelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"{solution() = }")
| 479 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Optional[int] ="timm_backbone"
def __init__( self ,_snake_case=None ,_snake_case=3 ,_snake_case=True ,_snake_case=True ,_snake_case=None ,**_snake_case ,):
super().__init__(**_snake_case )
UpperCAmelCase_ : Tuple = backbone
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : str = features_only
UpperCAmelCase_ : int = use_pretrained_backbone
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : str = out_indices if out_indices is not None else (-1,)
| 71 |
'''simple docstring'''
from math import factorial
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k" )
return factorial(_SCREAMING_SNAKE_CASE ) // (factorial(_SCREAMING_SNAKE_CASE ) * factorial(n - k ))
if __name__ == "__main__":
print(
"""The number of five-card hands possible from a standard""",
f"""fifty-two card deck is: {combinations(52, 5)}\n""",
)
print(
"""If a class of 40 students must be arranged into groups of""",
f"""4 for group projects, there are {combinations(40, 4)} ways""",
"""to arrange them.\n""",
)
print(
"""If 10 teams are competing in a Formula One race, there""",
f"""are {combinations(10, 3)} ways that first, second and""",
"""third place can be awarded.""",
)
| 71 | 1 |
'''simple docstring'''
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : Any ) -> Dict:
"""simple docstring"""
super().__init__()
lowerCamelCase_ : Any = nn.Linear(3 , 4 )
lowerCamelCase_ : str = nn.BatchNormad(4 )
lowerCamelCase_ : Any = nn.Linear(4 , 5 )
def __UpperCamelCase ( self : List[str] , UpperCamelCase_ : int ) -> int:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(UpperCamelCase_ ) ) )
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : List[Any] = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase_ , model.state_dict() )
lowerCamelCase_ : List[str] = os.path.join(UpperCamelCase_ , '''index.json''' )
self.assertTrue(os.path.isfile(UpperCamelCase_ ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
lowerCamelCase_ : List[str] = os.path.join(UpperCamelCase_ , F"""{key}.dat""" )
self.assertTrue(os.path.isfile(UpperCamelCase_ ) )
# TODO: add tests on the fact weights are properly loaded
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
lowerCamelCase_ : Any = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
lowerCamelCase_ : int = torch.randn(2 , 3 , dtype=UpperCamelCase_ )
with TemporaryDirectory() as tmp_dir:
lowerCamelCase_ : Tuple = offload_weight(UpperCamelCase_ , '''weight''' , UpperCamelCase_ , {} )
lowerCamelCase_ : Optional[Any] = os.path.join(UpperCamelCase_ , '''weight.dat''' )
self.assertTrue(os.path.isfile(UpperCamelCase_ ) )
self.assertDictEqual(UpperCamelCase_ , {'''weight''': {'''shape''': [2, 3], '''dtype''': str(UpperCamelCase_ ).split('''.''' )[1]}} )
lowerCamelCase_ : Any = load_offloaded_weight(UpperCamelCase_ , index['''weight'''] )
self.assertTrue(torch.equal(UpperCamelCase_ , UpperCamelCase_ ) )
def __UpperCamelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = ModelForTest()
lowerCamelCase_ : Tuple = model.state_dict()
lowerCamelCase_ : List[Any] = {k: v for k, v in state_dict.items() if '''linear2''' not in k}
lowerCamelCase_ : Optional[Any] = {k: v for k, v in state_dict.items() if '''linear2''' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase_ : Optional[Any] = OffloadedWeightsLoader(state_dict=UpperCamelCase_ , save_folder=UpperCamelCase_ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase_ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase_ , weight_map[key] ) )
lowerCamelCase_ : Any = {k: v for k, v in state_dict.items() if '''weight''' in k}
lowerCamelCase_ : Tuple = {k: v for k, v in state_dict.items() if '''weight''' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase_ , UpperCamelCase_ )
lowerCamelCase_ : Union[str, Any] = OffloadedWeightsLoader(state_dict=UpperCamelCase_ , save_folder=UpperCamelCase_ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase_ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase_ , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase_ , UpperCamelCase_ )
# Duplicates are removed
lowerCamelCase_ : Union[str, Any] = OffloadedWeightsLoader(state_dict=UpperCamelCase_ , save_folder=UpperCamelCase_ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase_ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase_ , weight_map[key] ) )
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : Any = {'''a.1''': 0, '''a.10''': 1, '''a.2''': 2}
lowerCamelCase_ : Dict = extract_submodules_state_dict(UpperCamelCase_ , ['''a.1''', '''a.2'''] )
self.assertDictEqual(UpperCamelCase_ , {'''a.1''': 0, '''a.2''': 2} )
lowerCamelCase_ : List[Any] = {'''a.1.a''': 0, '''a.10.a''': 1, '''a.2.a''': 2}
lowerCamelCase_ : int = extract_submodules_state_dict(UpperCamelCase_ , ['''a.1''', '''a.2'''] )
self.assertDictEqual(UpperCamelCase_ , {'''a.1.a''': 0, '''a.2.a''': 2} )
| 711 |
'''simple docstring'''
import pytest
import datasets
# Import fixture modules as plugins
__lowerCamelCase : Tuple = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def __snake_case (__UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=__UpperCAmelCase )
def __snake_case (__UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
lowerCamelCase_ : str = tmp_path_factory.getbasetemp() / '''cache'''
lowerCamelCase_ : Optional[int] = test_hf_cache_home / '''datasets'''
lowerCamelCase_ : List[Any] = test_hf_cache_home / '''metrics'''
lowerCamelCase_ : Tuple = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(__UpperCAmelCase ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(__UpperCAmelCase ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(__UpperCAmelCase ) )
lowerCamelCase_ : Dict = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(__UpperCAmelCase ) )
lowerCamelCase_ : Any = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(__UpperCAmelCase ) )
@pytest.fixture(autouse=__UpperCAmelCase , scope='''session''' )
def __snake_case ():
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=__UpperCAmelCase )
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
# don't take tests into account when counting downloads
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , __UpperCAmelCase )
@pytest.fixture
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , __UpperCAmelCase )
| 418 | 0 |
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class __lowercase ( _lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = "philschmid/bart-large-cnn-samsum"
SCREAMING_SNAKE_CASE : List[str] = (
"This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, "
"and returns a summary of the text."
)
SCREAMING_SNAKE_CASE : Optional[int] = "summarizer"
SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer
SCREAMING_SNAKE_CASE : int = AutoModelForSeqaSeqLM
SCREAMING_SNAKE_CASE : List[str] = ["text"]
SCREAMING_SNAKE_CASE : int = ["text"]
def __magic_name__ ( self , A_ )-> Tuple:
return self.pre_processor(A_ , return_tensors='pt' , truncation=A_ )
def __magic_name__ ( self , A_ )-> Dict:
return self.model.generate(**A_ )[0]
def __magic_name__ ( self , A_ )-> Union[str, Any]:
return self.pre_processor.decode(A_ , skip_special_tokens=A_ , clean_up_tokenization_spaces=A_ )
| 605 | '''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase : Dict = logging.get_logger(__name__)
_UpperCamelCase : Optional[int] = torch.device('cpu')
def __snake_case ( ):
__UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__UpperCAmelCase = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw )
return im
def __snake_case ( lowerCAmelCase : Tuple ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703E00, 2.1_107E00, -2.0_811E00, 8.8_685E-01, 2.4_360E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636E-01, 2.3_478E-01, -1.6_963E00, -1.7_381E00, -8.6_337E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768E-01, -4.7_429E-01, -1.0_897E00, -1.0_248E00, 3.5_523E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330E-01, 2.4_211E-01, -6.0_185E-01, -8.2_789E-01, -6.0_446E-02] )
def __snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : Dict ):
__UpperCAmelCase = dct.pop(lowerCAmelCase )
__UpperCAmelCase = val
def __snake_case ( lowerCAmelCase : Optional[int] ):
__UpperCAmelCase = []
for k in state_dict.keys():
__UpperCAmelCase = k
if ".pwconv" in k:
__UpperCAmelCase = k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
__UpperCAmelCase = k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
__UpperCAmelCase = k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
__UpperCAmelCase = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
__UpperCAmelCase = k_new.split('.' )
if ls[2].isdigit():
__UpperCAmelCase = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
__UpperCAmelCase = k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def __snake_case ( lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any ):
__UpperCAmelCase = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
__UpperCAmelCase = 1000
__UpperCAmelCase = 'huggingface/label-files'
__UpperCAmelCase = 'imagenet-1k-id2label.json'
__UpperCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
__UpperCAmelCase = {int(lowerCAmelCase ): v for k, v in idalabel.items()}
__UpperCAmelCase = idalabel
__UpperCAmelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
__UpperCAmelCase = [3, 3, 6, 4]
__UpperCAmelCase = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
__UpperCAmelCase = [3, 3, 9, 6]
__UpperCAmelCase = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
__UpperCAmelCase = [4, 3, 10, 5]
__UpperCAmelCase = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
__UpperCAmelCase = [4, 4, 12, 6]
__UpperCAmelCase = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
__UpperCAmelCase = torch.hub.load_state_dict_from_url(lowerCAmelCase , map_location='cpu' , check_hash=lowerCAmelCase )
else:
__UpperCAmelCase = torch.load(lowerCAmelCase , map_location='cpu' )
__UpperCAmelCase = checkpoint
__UpperCAmelCase = create_rename_keys(lowerCAmelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# load HuggingFace model
__UpperCAmelCase = SwiftFormerForImageClassification(lowerCAmelCase ).eval()
hf_model.load_state_dict(lowerCAmelCase )
# prepare test inputs
__UpperCAmelCase = prepare_img()
__UpperCAmelCase = ViTImageProcessor.from_pretrained('preprocessor_config' )
__UpperCAmelCase = processor(images=lowerCAmelCase , return_tensors='pt' )
# compare outputs from both models
__UpperCAmelCase = get_expected_output(lowerCAmelCase )
__UpperCAmelCase = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , lowerCAmelCase , atol=1E-3 )
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
_UpperCamelCase : Union[str, Any] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 396 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = '▁'
_lowerCamelCase = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
_lowerCamelCase = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
_lowerCamelCase = {'vinai/bartpho-syllable': 1024}
class __A ( lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = ["""input_ids""", """attention_mask"""]
def __init__( self , a__ , a__ , a__="<s>" , a__="</s>" , a__="</s>" , a__="<s>" , a__="<unk>" , a__="<pad>" , a__="<mask>" , a__ = None , **a__ , ):
"""simple docstring"""
_lowerCamelCase : Tuple = AddedToken(a__ , lstrip=a__ , rstrip=a__) if isinstance(a__ , a__) else mask_token
_lowerCamelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , cls_token=a__ , pad_token=a__ , mask_token=a__ , sp_model_kwargs=self.sp_model_kwargs , **a__ , )
_lowerCamelCase : Optional[Any] = vocab_file
_lowerCamelCase : Tuple = monolingual_vocab_file
_lowerCamelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(a__))
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
_lowerCamelCase : List[Any] = {}
_lowerCamelCase : int = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(a__) not in self.fairseq_tokens_to_ids:
_lowerCamelCase : Any = cnt
cnt += 1
with open(a__ , '''r''' , encoding='''utf-8''') as f:
for line in f.readlines():
_lowerCamelCase : Union[str, Any] = line.strip().split()[0]
_lowerCamelCase : List[Any] = len(self.fairseq_tokens_to_ids)
if str(a__) not in self.fairseq_tokens_to_ids:
_lowerCamelCase : int = len(self.fairseq_tokens_to_ids)
_lowerCamelCase : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.__dict__.copy()
_lowerCamelCase : Dict = None
_lowerCamelCase : Tuple = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , a__):
"""simple docstring"""
_lowerCamelCase : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
_lowerCamelCase : List[Any] = {}
_lowerCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def __snake_case ( self , a__ , a__ = None):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCamelCase : int = [self.cls_token_id]
_lowerCamelCase : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __snake_case ( self , a__ , a__ = None , a__ = False):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__)
if token_ids_a is None:
return [1] + ([0] * len(a__)) + [1]
return [1] + ([0] * len(a__)) + [1, 1] + ([0] * len(a__)) + [1]
def __snake_case ( self , a__ , a__ = None):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [self.sep_token_id]
_lowerCamelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def __snake_case ( self):
"""simple docstring"""
return len(self.fairseq_ids_to_tokens)
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : str = {self.convert_ids_to_tokens(a__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __snake_case ( self , a__):
"""simple docstring"""
return self.sp_model.encode(a__ , out_type=a__)
def __snake_case ( self , a__):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __snake_case ( self , a__):
"""simple docstring"""
return self.fairseq_ids_to_tokens[index]
def __snake_case ( self , a__):
"""simple docstring"""
_lowerCamelCase : List[str] = ''''''.join(a__).replace(a__ , ''' ''').strip()
return out_string
def __snake_case ( self , a__ , a__ = None):
"""simple docstring"""
if not os.path.isdir(a__):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""")
return
_lowerCamelCase : Optional[int] = os.path.join(
a__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
_lowerCamelCase : Tuple = os.path.join(
a__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file) != os.path.abspath(a__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , a__)
elif not os.path.isfile(self.vocab_file):
with open(a__ , '''wb''') as fi:
_lowerCamelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(a__)
if os.path.abspath(self.monolingual_vocab_file) != os.path.abspath(
a__) and os.path.isfile(self.monolingual_vocab_file):
copyfile(self.monolingual_vocab_file , a__)
elif not os.path.isfile(self.monolingual_vocab_file):
with open(a__ , '''w''' , encoding='''utf-8''') as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F"""{str(a__)} \n""")
return out_vocab_file, out_monolingual_vocab_file
| 716 |
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_lowerCamelCase = logging.get_logger(__name__)
class __A ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self , *a__ , **a__):
"""simple docstring"""
warnings.warn(
'''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DonutImageProcessor instead.''' , a__ , )
super().__init__(*a__ , **a__)
| 613 | 0 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase ( __a ):
'''simple docstring'''
snake_case = (DDIMParallelScheduler,)
snake_case = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def lowerCamelCase__ ( self : Optional[int] , **__snake_case : Dict ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**lowerCAmelCase__ )
return config
def lowerCamelCase__ ( self : Dict , **__snake_case : Tuple ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = self.scheduler_classes[0]
lowerCamelCase = self.get_scheduler_config(**lowerCAmelCase__ )
lowerCamelCase = scheduler_class(**lowerCAmelCase__ )
lowerCamelCase , lowerCamelCase = 10, 0.0
lowerCamelCase = self.dummy_model()
lowerCamelCase = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase__ )
for t in scheduler.timesteps:
lowerCamelCase = model(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCamelCase = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
return sample
def lowerCamelCase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def lowerCamelCase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCAmelCase__ )
lowerCamelCase = self.scheduler_classes[0]
lowerCamelCase = self.get_scheduler_config(steps_offset=1 )
lowerCamelCase = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def lowerCamelCase__ ( self : int ) -> Any:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ )
def lowerCamelCase__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase__ )
def lowerCamelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def lowerCamelCase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase__ )
def lowerCamelCase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowerCAmelCase__ )
def lowerCamelCase__ ( self : Any ) -> Any:
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowerCAmelCase__ )
def lowerCamelCase__ ( self : List[str] ) -> Any:
'''simple docstring'''
self.check_over_configs(thresholding=lowerCAmelCase__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase__ , prediction_type=lowerCAmelCase__ , sample_max_value=lowerCAmelCase__ , )
def lowerCamelCase__ ( self : str ) -> str:
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowerCAmelCase__ )
def lowerCamelCase__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=lowerCAmelCase__ , num_inference_steps=lowerCAmelCase__ )
def lowerCamelCase__ ( self : List[str] ) -> str:
'''simple docstring'''
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=lowerCAmelCase__ , eta=lowerCAmelCase__ )
def lowerCamelCase__ ( self : str ) -> int:
'''simple docstring'''
lowerCamelCase = self.scheduler_classes[0]
lowerCamelCase = self.get_scheduler_config()
lowerCamelCase = scheduler_class(**lowerCAmelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_4771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_2460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5
def lowerCamelCase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowerCamelCase = self.scheduler_classes[0]
lowerCamelCase = self.get_scheduler_config()
lowerCamelCase = scheduler_class(**lowerCAmelCase__ )
lowerCamelCase , lowerCamelCase = 10, 0.0
scheduler.set_timesteps(lowerCAmelCase__ )
lowerCamelCase = self.dummy_model()
lowerCamelCase = self.dummy_sample_deter
lowerCamelCase = self.dummy_sample_deter + 0.1
lowerCamelCase = self.dummy_sample_deter - 0.1
lowerCamelCase = samplea.shape[0]
lowerCamelCase = torch.stack([samplea, samplea, samplea] , dim=0 )
lowerCamelCase = torch.arange(lowerCAmelCase__ )[0:3, None].repeat(1 , lowerCAmelCase__ )
lowerCamelCase = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
lowerCamelCase = scheduler.batch_step_no_noise(lowerCAmelCase__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , lowerCAmelCase__ )
lowerCamelCase = torch.sum(torch.abs(lowerCAmelCase__ ) )
lowerCamelCase = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 1147.7904 ) < 1e-2
assert abs(result_mean.item() - 0.4982 ) < 1e-3
def lowerCamelCase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = self.full_loop()
lowerCamelCase = torch.sum(torch.abs(lowerCAmelCase__ ) )
lowerCamelCase = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 172.0067 ) < 1e-2
assert abs(result_mean.item() - 0.22_3967 ) < 1e-3
def lowerCamelCase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
lowerCamelCase = self.full_loop(prediction_type='v_prediction' )
lowerCamelCase = torch.sum(torch.abs(lowerCAmelCase__ ) )
lowerCamelCase = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 52.5302 ) < 1e-2
assert abs(result_mean.item() - 0.0684 ) < 1e-3
def lowerCamelCase__ ( self : List[Any] ) -> int:
'''simple docstring'''
lowerCamelCase = self.full_loop(set_alpha_to_one=lowerCAmelCase__ , beta_start=0.01 )
lowerCamelCase = torch.sum(torch.abs(lowerCAmelCase__ ) )
lowerCamelCase = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 149.8295 ) < 1e-2
assert abs(result_mean.item() - 0.1951 ) < 1e-3
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
lowerCamelCase = self.full_loop(set_alpha_to_one=lowerCAmelCase__ , beta_start=0.01 )
lowerCamelCase = torch.sum(torch.abs(lowerCAmelCase__ ) )
lowerCamelCase = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 149.0784 ) < 1e-2
assert abs(result_mean.item() - 0.1941 ) < 1e-3
| 246 |
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase : Union[str, Any] = logging.getLogger()
def _lowercase ( __UpperCamelCase : int ):
snake_case__ = {}
snake_case__ = os.path.join(__UpperCamelCase , """all_results.json""" )
if os.path.exists(__UpperCamelCase ):
with open(__UpperCamelCase , """r""" ) as f:
snake_case__ = json.load(__UpperCamelCase )
else:
raise ValueError(F'''can\'t find {path}''' )
return results
lowerCAmelCase : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class SCREAMING_SNAKE_CASE__ ( __a ):
'''simple docstring'''
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
import xla_spawn
snake_case__ = self.get_auto_remove_tmp_dir()
snake_case__ = f'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(lowerCAmelCase__ , """argv""" , lowerCAmelCase__ ):
snake_case__ = time()
xla_spawn.main()
snake_case__ = time()
snake_case__ = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
import xla_spawn
snake_case__ = """
./tests/test_trainer_tpu.py
--num_cores=8
./tests/test_trainer_tpu.py
""".split()
with patch.object(lowerCAmelCase__ , """argv""" , lowerCAmelCase__ ):
xla_spawn.main()
| 214 | 0 |
'''simple docstring'''
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
lowercase__ : Dict = '''sshleifer/bart-tiny-random'''
lowercase__ : Union[str, Any] = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return AutoConfig.from_pretrained(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,*__A : List[Any] = create_student_by_copying_alternating_layers(_UpperCAmelCase , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.num_hidden_layers , 1)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,*__A : List[str] = create_student_by_copying_alternating_layers(_UpperCAmelCase , tempfile.mkdtemp() , e=1 , d=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,*__A : List[Any] = create_student_by_copying_alternating_layers(_UpperCAmelCase , tempfile.mkdtemp() , e=1 , d=_UpperCAmelCase)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,*__A : Tuple = create_student_by_copying_alternating_layers(_UpperCAmelCase , tempfile.mkdtemp() , e=1 , d=1)
self.assertEqual(student.config.encoder_layers , 1)
self.assertEqual(student.config.decoder_layers , 1)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaises(_UpperCAmelCase):
create_student_by_copying_alternating_layers(_UpperCAmelCase , tempfile.mkdtemp() , e=_UpperCAmelCase , d=_UpperCAmelCase) | 338 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase__ : Optional[int] = logging.get_logger(__name__)
lowercase__ : List[str] = {'''vocab_file''': '''spiece.model'''}
lowercase__ : Optional[int] = {
'''vocab_file''': {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model''',
'''google/bigbird-roberta-large''': (
'''https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'''
),
'''google/bigbird-base-trivia-itc''': (
'''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'''
),
}
}
lowercase__ : List[str] = {
'''google/bigbird-roberta-base''': 40_96,
'''google/bigbird-roberta-large''': 40_96,
'''google/bigbird-base-trivia-itc''': 40_96,
}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = ['''input_ids''', '''attention_mask''']
lowerCAmelCase = []
def __init__( self , _UpperCAmelCase , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="[MASK]" , _UpperCAmelCase="[CLS]" , _UpperCAmelCase = None , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Union[str, Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else bos_token
__A : List[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else eos_token
__A : Optional[int] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else unk_token
__A : str = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else pad_token
__A : int = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else cls_token
__A : Optional[int] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
__A : Optional[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else mask_token
__A : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
__A : Tuple = vocab_file
__A : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_UpperCAmelCase)
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return self.sp_model.get_piece_size()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = {self.convert_ids_to_tokens(_UpperCAmelCase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self):
'''simple docstring'''
__A : Optional[int] = self.__dict__.copy()
__A : List[str] = None
return state
def __setstate__( self , _UpperCAmelCase):
'''simple docstring'''
__A : Dict = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
__A : Tuple = {}
__A : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return self.sp_model.piece_to_id(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = self.sp_model.IdToPiece(_UpperCAmelCase)
return token
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : str = []
__A : int = ''
__A : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCAmelCase) + token
__A : Dict = True
__A : List[Any] = []
else:
current_sub_tokens.append(_UpperCAmelCase)
__A : Union[str, Any] = False
out_string += self.sp_model.decode(_UpperCAmelCase)
return out_string.strip()
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = True , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Tuple = kwargs.pop('use_source_tokenizer' , _UpperCAmelCase)
__A : str = self.convert_ids_to_tokens(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__A : Dict = []
__A : Dict = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_UpperCAmelCase))
__A : Any = []
sub_texts.append(_UpperCAmelCase)
else:
current_sub_text.append(_UpperCAmelCase)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_UpperCAmelCase))
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
__A : Tuple = re.sub(R' (\[(MASK|SEP)\])' , R'\1' , ' '.join(_UpperCAmelCase))
else:
__A : Any = ''.join(_UpperCAmelCase)
__A : List[str] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__A : str = self.clean_up_tokenization(_UpperCAmelCase)
return clean_text
else:
return text
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if not os.path.isdir(_UpperCAmelCase):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
__A : int = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(_UpperCAmelCase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _UpperCAmelCase)
elif not os.path.isfile(self.vocab_file):
with open(_UpperCAmelCase , 'wb') as fi:
__A : Any = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase)
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A : Union[str, Any] = [self.cls_token_id]
__A : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase)
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase)) + [1]
return [1] + ([0] * len(_UpperCAmelCase)) + [1] + ([0] * len(_UpperCAmelCase)) + [1]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
__A : str = [self.sep_token_id]
__A : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] | 338 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int = 2_00 ) -> int:
__A : Optional[Any] = [1, 2, 5, 10, 20, 50, 1_00, 2_00]
__A : List[str] = [0] * (pence + 1)
__A : Optional[Any] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(__snake_case , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82 | 8 |
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__)
class __lowercase ( A ):
__magic_name__ : Any = '''sequence-classification'''
def __init__( self , a__ ) -> Optional[Any]:
'''simple docstring'''
if type(a__ ) == dict:
A_ = Namespace(**a__ )
A_ = glue_output_modes[hparams.task]
A_ = glue_tasks_num_labels[hparams.task]
super().__init__(a__ , a__ , self.mode )
def lowerCAmelCase_ ( self , **a__ ) -> List[Any]:
'''simple docstring'''
return self.model(**a__ )
def lowerCAmelCase_ ( self , a__ , a__ ) -> Optional[int]:
'''simple docstring'''
A_ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
A_ = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
A_ = self(**a__ )
A_ = outputs[0]
A_ = self.trainer.lr_schedulers[0]['''scheduler''']
A_ = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
A_ = self.hparams
A_ = processors[args.task]()
A_ = processor.get_labels()
for mode in ["train", "dev"]:
A_ = self._feature_file(a__ )
if os.path.exists(a__ ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , a__ )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
A_ = (
processor.get_dev_examples(args.data_dir )
if mode == '''dev'''
else processor.get_train_examples(args.data_dir )
)
A_ = convert_examples_to_features(
a__ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('''Saving features into cached file %s''' , a__ )
torch.save(a__ , a__ )
def lowerCAmelCase_ ( self , a__ , a__ , a__ = False ) -> DataLoader:
'''simple docstring'''
A_ = '''dev''' if mode == '''test''' else mode
A_ = self._feature_file(a__ )
logger.info('''Loading features from cached file %s''' , a__ )
A_ = torch.load(a__ )
A_ = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
A_ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
A_ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
A_ = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
A_ = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(a__ , a__ , a__ , a__ ) , batch_size=a__ , shuffle=a__ , )
def lowerCAmelCase_ ( self , a__ , a__ ) -> Union[str, Any]:
'''simple docstring'''
A_ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
A_ = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
A_ = self(**a__ )
A_ , A_ = outputs[:2]
A_ = logits.detach().cpu().numpy()
A_ = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def lowerCAmelCase_ ( self , a__ ) -> tuple:
'''simple docstring'''
A_ = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item()
A_ = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
A_ = np.argmax(a__ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
A_ = np.squeeze(a__ )
A_ = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
A_ = [[] for _ in range(out_label_ids.shape[0] )]
A_ = [[] for _ in range(out_label_ids.shape[0] )]
A_ = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , a__ , a__ )}
A_ = dict(results.items() )
A_ = results
return ret, preds_list, out_label_list
def lowerCAmelCase_ ( self , a__ ) -> dict:
'''simple docstring'''
A_ , A_ , A_ = self._eval_end(a__ )
A_ = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def lowerCAmelCase_ ( self , a__ ) -> dict:
'''simple docstring'''
A_ , A_ , A_ = self._eval_end(a__ )
A_ = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def lowerCAmelCase_ ( a__ , a__ ) -> Dict:
'''simple docstring'''
BaseTransformer.add_model_specific_args(a__ , a__ )
parser.add_argument(
'''--max_seq_length''' , default=1_2_8 , type=a__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--task''' , default='''''' , type=a__ , required=a__ , help='''The GLUE task to run''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=a__ , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
def lowerCamelCase_ ( ):
A_ = argparse.ArgumentParser()
add_generic_args(__UpperCamelCase , os.getcwd() )
A_ = GLUETransformer.add_model_specific_args(__UpperCamelCase , os.getcwd() )
A_ = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
A_ = os.path.join(
'''./results''' , F"{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}" , )
os.makedirs(args.output_dir )
A_ = GLUETransformer(__UpperCamelCase )
A_ = generic_train(__UpperCamelCase , __UpperCamelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
A_ = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=__UpperCamelCase ) )
A_ = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__UpperCamelCase )
if __name__ == "__main__":
main() | 141 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : int =logging.get_logger(__name__)
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Any:
_lowerCamelCase : List[Any] = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
_lowerCamelCase : List[Any] = DetaConfig(
backbone_config=SCREAMING_SNAKE_CASE_ , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=SCREAMING_SNAKE_CASE_ , with_box_refine=SCREAMING_SNAKE_CASE_ , two_stage=SCREAMING_SNAKE_CASE_ , )
# set labels
_lowerCamelCase : Optional[int] = '''huggingface/label-files'''
if "o365" in model_name:
_lowerCamelCase : List[str] = 366
_lowerCamelCase : Any = '''object365-id2label.json'''
else:
_lowerCamelCase : Tuple = 91
_lowerCamelCase : List[str] = '''coco-detection-id2label.json'''
_lowerCamelCase : Optional[int] = num_labels
_lowerCamelCase : Dict = json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='''dataset''' ) ) , '''r''' ) )
_lowerCamelCase : int = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
_lowerCamelCase : List[Any] = idalabel
_lowerCamelCase : Any = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->int:
_lowerCamelCase : Optional[int] = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.reduction.weight''', F'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.norm.weight''', F'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((F'''backbone.0.body.layers.{i}.downsample.norm.bias''', F'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') )
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') )
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') )
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') )
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') )
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', F'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', F'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', F'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', F'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', F'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', F'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', F'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', F'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.weight''', F'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', F'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', F'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', F'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', F'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', F'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', F'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', F'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', F'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.weight''', F'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm2.weight''', F'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm2.bias''', F'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->str:
_lowerCamelCase : List[Any] = dct.pop(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Optional[Any] = val
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Any:
_lowerCamelCase : List[str] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_lowerCamelCase : int = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_lowerCamelCase : Tuple = state_dict.pop(F'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
_lowerCamelCase : str = state_dict.pop(F'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Optional[Any] = in_proj_weight[:dim, :]
_lowerCamelCase : Tuple = in_proj_bias[: dim]
_lowerCamelCase : Tuple = in_proj_weight[
dim : dim * 2, :
]
_lowerCamelCase : str = in_proj_bias[
dim : dim * 2
]
_lowerCamelCase : str = in_proj_weight[
-dim :, :
]
_lowerCamelCase : str = in_proj_bias[-dim :]
# fmt: on
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Union[str, Any]:
# transformer decoder self-attention layers
_lowerCamelCase : str = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
_lowerCamelCase : List[Any] = state_dict.pop(F'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
_lowerCamelCase : int = state_dict.pop(F'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Dict = in_proj_weight[:hidden_size, :]
_lowerCamelCase : int = in_proj_bias[:hidden_size]
_lowerCamelCase : Any = in_proj_weight[
hidden_size : hidden_size * 2, :
]
_lowerCamelCase : List[Any] = in_proj_bias[hidden_size : hidden_size * 2]
_lowerCamelCase : str = in_proj_weight[-hidden_size:, :]
_lowerCamelCase : List[Any] = in_proj_bias[-hidden_size:]
def UpperCamelCase ( ) ->Any:
_lowerCamelCase : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowerCamelCase : str = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->List[str]:
_lowerCamelCase : Tuple = get_deta_config(SCREAMING_SNAKE_CASE_ )
# load original state dict
if model_name == "deta-swin-large":
_lowerCamelCase : Any = hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' )
elif model_name == "deta-swin-large-o365":
_lowerCamelCase : Optional[Any] = hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
_lowerCamelCase : Dict = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' )['''model''']
# original state dict
for name, param in state_dict.items():
print(SCREAMING_SNAKE_CASE_ , param.shape )
# rename keys
_lowerCamelCase : List[Any] = create_rename_keys(SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_swin_q_k_v(SCREAMING_SNAKE_CASE_ , config.backbone_config )
read_in_decoder_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
_lowerCamelCase : List[Any] = state_dict.pop(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Union[str, Any] = val
if "input_proj" in key:
_lowerCamelCase : Optional[int] = state_dict.pop(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : List[str] = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
_lowerCamelCase : Dict = state_dict.pop(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : List[Any] = val
# finally, create HuggingFace model and load state dict
_lowerCamelCase : Any = DetaForObjectDetection(SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
model.eval()
_lowerCamelCase : Tuple = '''cuda''' if torch.cuda.is_available() else '''cpu'''
model.to(SCREAMING_SNAKE_CASE_ )
# load image processor
_lowerCamelCase : Optional[Any] = DetaImageProcessor(format='''coco_detection''' )
# verify our conversion on image
_lowerCamelCase : Tuple = prepare_img()
_lowerCamelCase : Optional[Any] = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
_lowerCamelCase : int = encoding['''pixel_values''']
_lowerCamelCase : Optional[int] = model(pixel_values.to(SCREAMING_SNAKE_CASE_ ) )
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3] )
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
_lowerCamelCase : List[Any] = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
_lowerCamelCase : List[str] = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
_lowerCamelCase : Optional[Any] = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
_lowerCamelCase : List[str] = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(SCREAMING_SNAKE_CASE_ ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(SCREAMING_SNAKE_CASE_ ) , atol=1e-4 )
print('''Everything ok!''' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''' )
model.push_to_hub(F'''jozhang97/{model_name}''' )
processor.push_to_hub(F'''jozhang97/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple =argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
SCREAMING_SNAKE_CASE__ : Any =parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 709 | """simple docstring"""
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
SCREAMING_SNAKE_CASE__ : str =logging.get_logger(__name__)
@dataclass
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
__snake_case = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self , **_lowercase ) -> Tuple:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_lowerCamelCase : int = deprecated_arg[3:]
_lowerCamelCase : List[Any] = not kwargs.pop(_lowercase )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
_lowerCamelCase : Dict = kwargs.pop('''tpu_name''' , self.tpu_name )
_lowerCamelCase : Optional[Any] = kwargs.pop('''device_idx''' , self.device_idx )
_lowerCamelCase : Union[str, Any] = kwargs.pop('''eager_mode''' , self.eager_mode )
_lowerCamelCase : int = kwargs.pop('''use_xla''' , self.use_xla )
super().__init__(**_lowercase )
__snake_case = field(
default=a_ , metadata={"""help""": """Name of TPU"""} , )
__snake_case = field(
default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , )
__snake_case = field(default=a_ , metadata={"""help""": """Benchmark models in eager model."""} )
__snake_case = field(
default=a_ , metadata={
"""help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."""
} , )
@cached_property
def a__ ( self ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ['''tf'''] )
_lowerCamelCase : List[str] = None
if self.tpu:
try:
if self.tpu_name:
_lowerCamelCase : Optional[int] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
_lowerCamelCase : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
_lowerCamelCase : int = None
return tpu
@cached_property
def a__ ( self ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]:
requires_backends(self , ['''tf'''] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
_lowerCamelCase : int = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , '''GPU''' )
_lowerCamelCase : str = tf.distribute.OneDeviceStrategy(device=F'''/gpu:{self.device_idx}''' )
else:
tf.config.set_visible_devices([] , '''GPU''' ) # disable GPU
_lowerCamelCase : Tuple = tf.distribute.OneDeviceStrategy(device=F'''/cpu:{self.device_idx}''' )
return strategy
@property
def a__ ( self ) -> bool:
requires_backends(self , ['''tf'''] )
return self._setup_tpu is not None
@property
def a__ ( self ) -> "tf.distribute.Strategy":
requires_backends(self , ['''tf'''] )
return self._setup_strategy
@property
def a__ ( self ) -> str:
requires_backends(self , ['''tf'''] )
return tf.config.list_physical_devices('''GPU''' )
@property
def a__ ( self ) -> int:
requires_backends(self , ['''tf'''] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def a__ ( self ) -> bool:
return self.n_gpu > 0
| 558 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE (_UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__a =LayoutLMTokenizer
__a =LayoutLMTokenizerFast
__a =True
__a =True
def UpperCamelCase__ ( self : Tuple ):
super().setUp()
_a = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCamelCase__ ( self : Optional[Any] , **__a : Tuple ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__a )
def UpperCamelCase__ ( self : Optional[Any] , __a : Dict ):
_a = "UNwant\u00E9d,running"
_a = "unwanted, running"
return input_text, output_text
def UpperCamelCase__ ( self : Optional[Any] ):
_a = self.tokenizer_class(self.vocab_file )
_a = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(__a , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [7, 4, 5, 10, 8, 9] )
def UpperCamelCase__ ( self : Union[str, Any] ):
pass
| 692 |
"""simple docstring"""
from math import log
from scipy.constants import Boltzmann, physical_constants
SCREAMING_SNAKE_CASE__ = 300 # TEMPERATURE (unit = K)
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , ):
'''simple docstring'''
if donor_conc <= 0:
raise ValueError("""Donor concentration should be positive""" )
elif acceptor_conc <= 0:
raise ValueError("""Acceptor concentration should be positive""" )
elif intrinsic_conc <= 0:
raise ValueError("""Intrinsic concentration should be positive""" )
elif donor_conc <= intrinsic_conc:
raise ValueError(
"""Donor concentration should be greater than intrinsic concentration""" )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
"""Acceptor concentration should be greater than intrinsic concentration""" )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 532 | 0 |
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :List[str] = ''''''
for i in table:
res += inp[i - 1]
return res
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return data[1:] + data[0]
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Dict = ''''''
for i in range(len(a_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = int('''0b''' + data[0] + data[-1] , 2 )
__UpperCamelCase :List[Any] = int('''0b''' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = message[:4]
__UpperCamelCase :Optional[Any] = message[4:]
__UpperCamelCase :Optional[Any] = apply_table(a_ , a_ )
__UpperCamelCase :Optional[Any] = xor(a_ , a_ )
__UpperCamelCase :str = apply_sbox(a_ , temp[:4] ) # noqa: E741
__UpperCamelCase :int = apply_sbox(a_ , temp[4:] )
__UpperCamelCase :List[Any] = '''0''' * (2 - len(a_ )) + l # noqa: E741
__UpperCamelCase :int = '''0''' * (2 - len(a_ )) + r
__UpperCamelCase :str = apply_table(l + r , a_ )
__UpperCamelCase :str = xor(a_ , a_ )
return temp + right
if __name__ == "__main__":
__lowercase = input('''Enter 10 bit key: ''')
__lowercase = input('''Enter 8 bit message: ''')
__lowercase = [6, 3, 7, 4, 8, 5, 10, 9]
__lowercase = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
__lowercase = [2, 4, 3, 1]
__lowercase = [2, 6, 3, 1, 4, 8, 5, 7]
__lowercase = [4, 1, 3, 5, 7, 2, 8, 6]
__lowercase = [4, 1, 2, 3, 2, 3, 4, 1]
__lowercase = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__lowercase = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__lowercase = apply_table(key, paa_table)
__lowercase = temp[:5]
__lowercase = temp[5:]
__lowercase = left_shift(left)
__lowercase = left_shift(right)
__lowercase = apply_table(left + right, pa_table)
__lowercase = left_shift(left)
__lowercase = left_shift(right)
__lowercase = left_shift(left)
__lowercase = left_shift(right)
__lowercase = apply_table(left + right, pa_table)
# encryption
__lowercase = apply_table(message, IP)
__lowercase = function(expansion, sa, sa, keya, temp)
__lowercase = temp[4:] + temp[:4]
__lowercase = function(expansion, sa, sa, keya, temp)
__lowercase = apply_table(temp, IP_inv)
print('''Cipher text is:''', CT)
# decryption
__lowercase = apply_table(CT, IP)
__lowercase = function(expansion, sa, sa, keya, temp)
__lowercase = temp[4:] + temp[:4]
__lowercase = function(expansion, sa, sa, keya, temp)
__lowercase = apply_table(temp, IP_inv)
print('''Plain text after decypting is:''', PT)
| 713 | import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class lowerCamelCase_ ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : Any = CpmAntTokenizer
a__ : Optional[Any] = False
def UpperCamelCase__ ( self) -> Any:
super().setUp()
__UpperCamelCase :Optional[int] = [
'''<d>''',
'''</d>''',
'''<s>''',
'''</s>''',
'''</_>''',
'''<unk>''',
'''<pad>''',
'''</n>''',
'''我''',
'''是''',
'''C''',
'''P''',
'''M''',
'''A''',
'''n''',
'''t''',
]
__UpperCamelCase :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
@tooslow
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Tuple = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''')
__UpperCamelCase :Dict = '''今天天气真好!'''
__UpperCamelCase :Tuple = ['''今天''', '''天气''', '''真''', '''好''', '''!''']
__UpperCamelCase :Optional[Any] = tokenizer.tokenize(__lowercase)
self.assertListEqual(__lowercase , __lowercase)
__UpperCamelCase :int = '''今天天气真好!'''
__UpperCamelCase :List[str] = [tokenizer.bos_token] + tokens
__UpperCamelCase :List[str] = [6, 9_802, 14_962, 2_082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase) , __lowercase)
__UpperCamelCase :Dict = tokenizer.decode(__lowercase)
self.assertEqual(__lowercase , __lowercase)
| 452 | 0 |
'''simple docstring'''
a_ : Any = tuple[float, float, float]
a_ : Tuple = tuple[float, float, float]
def __snake_case ( UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad ):
lowerCamelCase_ = end_pointa[0] - end_pointa[0]
lowerCamelCase_ = end_pointa[1] - end_pointa[1]
lowerCamelCase_ = end_pointa[2] - end_pointa[2]
return (x, y, z)
def __snake_case ( UpperCAmelCase_ : Vectorad , UpperCAmelCase_ : Vectorad ):
lowerCamelCase_ = ab[1] * ac[2] - ab[2] * ac[1] # *i
lowerCamelCase_ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
lowerCamelCase_ = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def __snake_case ( UpperCAmelCase_ : Vectorad , UpperCAmelCase_ : int ):
return tuple(round(UpperCAmelCase_ , UpperCAmelCase_ ) for x in vector ) == (0, 0, 0)
def __snake_case ( UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad , UpperCAmelCase_ : Pointad , UpperCAmelCase_ : int = 10 ):
lowerCamelCase_ = create_vector(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase_ = create_vector(UpperCAmelCase_ , UpperCAmelCase_ )
return is_zero_vector(get_ad_vectors_cross(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ )
| 675 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int=1024 , UpperCAmelCase_ : Tuple=1024 , UpperCAmelCase_ : List[Any]=False , **UpperCAmelCase_ : Optional[Any] ):
lowerCamelCase_ = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
lowerCamelCase_ = SeqaSeqDataset(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , type_path="train" , **UpperCAmelCase_ )
lowerCamelCase_ = tok.pad_token_id
def get_lens(UpperCAmelCase_ : List[str] ):
lowerCamelCase_ = tqdm(
DataLoader(UpperCAmelCase_ , batch_size=512 , num_workers=8 , shuffle=UpperCAmelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
lowerCamelCase_ = []
for batch in dl:
lowerCamelCase_ = batch["input_ids"].ne(UpperCAmelCase_ ).sum(1 ).tolist()
lowerCamelCase_ = batch["labels"].ne(UpperCAmelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
max_lens.append(max(UpperCAmelCase_ , UpperCAmelCase_ ) )
else:
max_lens.extend(UpperCAmelCase_ )
return max_lens
lowerCamelCase_ = get_lens(UpperCAmelCase_ )
lowerCamelCase_ = SeqaSeqDataset(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , type_path="val" , **UpperCAmelCase_ )
lowerCamelCase_ = get_lens(UpperCAmelCase_ )
pickle_save(UpperCAmelCase_ , train_ds.len_file )
pickle_save(UpperCAmelCase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 675 | 1 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase :str = logging.get_logger(__name__)
__lowerCAmelCase :Optional[Any] = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class _a( _SCREAMING_SNAKE_CASE ):
lowerCamelCase__ :Optional[int] = "efficientformer"
def __init__( self , __snake_case = [3, 2, 6, 4] , __snake_case = [4_8, 9_6, 2_2_4, 4_4_8] , __snake_case = [True, True, True, True] , __snake_case = 4_4_8 , __snake_case = 3_2 , __snake_case = 4 , __snake_case = 7 , __snake_case = 5 , __snake_case = 8 , __snake_case = 4 , __snake_case = 0.0 , __snake_case = 1_6 , __snake_case = 3 , __snake_case = 3 , __snake_case = 3 , __snake_case = 2 , __snake_case = 1 , __snake_case = 0.0 , __snake_case = 1 , __snake_case = True , __snake_case = True , __snake_case = 1E-5 , __snake_case = "gelu" , __snake_case = 0.02 , __snake_case = 1E-12 , __snake_case = 2_2_4 , __snake_case = 1E-05 , **__snake_case , ) -> None:
'''simple docstring'''
super().__init__(**A_ )
_snake_case : str = hidden_act
_snake_case : Union[str, Any] = hidden_dropout_prob
_snake_case : List[str] = hidden_sizes
_snake_case : List[str] = num_hidden_layers
_snake_case : Dict = num_attention_heads
_snake_case : Tuple = initializer_range
_snake_case : str = layer_norm_eps
_snake_case : Tuple = patch_size
_snake_case : Any = num_channels
_snake_case : List[str] = depths
_snake_case : Optional[int] = mlp_expansion_ratio
_snake_case : Any = downsamples
_snake_case : Optional[int] = dim
_snake_case : List[Any] = key_dim
_snake_case : int = attention_ratio
_snake_case : Tuple = resolution
_snake_case : Dict = pool_size
_snake_case : Tuple = downsample_patch_size
_snake_case : Union[str, Any] = downsample_stride
_snake_case : Tuple = downsample_pad
_snake_case : str = drop_path_rate
_snake_case : Tuple = num_metaad_blocks
_snake_case : List[Any] = distillation
_snake_case : int = use_layer_scale
_snake_case : Optional[int] = layer_scale_init_value
_snake_case : Optional[Any] = image_size
_snake_case : int = batch_norm_eps | 703 |
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def A ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="attention" ):
_snake_case : Any = params[F"""{prefix}/layers_{i}/{layer_name}/key/kernel"""]
_snake_case : Tuple = params[F"""{prefix}/layers_{i}/{layer_name}/out/kernel"""]
_snake_case : str = params[F"""{prefix}/layers_{i}/{layer_name}/query/kernel"""]
_snake_case : List[Any] = params[F"""{prefix}/layers_{i}/{layer_name}/value/kernel"""]
return k, o, q, v
def A ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ):
if split_mlp_wi:
_snake_case : List[Any] = params[F"""{prefix}/layers_{i}/mlp/wi_0/kernel"""]
_snake_case : Tuple = params[F"""{prefix}/layers_{i}/mlp/wi_1/kernel"""]
_snake_case : Union[str, Any] = (wi_a, wi_a)
else:
_snake_case : Optional[int] = params[F"""{prefix}/layers_{i}/mlp/wi/kernel"""]
_snake_case : List[str] = params[F"""{prefix}/layers_{i}/mlp/wo/kernel"""]
return wi, wo
def A ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
return params[F"""{prefix}/layers_{i}/{layer_name}/scale"""]
def A ( UpperCAmelCase , *, UpperCAmelCase , UpperCAmelCase ):
_snake_case : Any = traverse_util.flatten_dict(variables["target"] )
_snake_case : str = {"/".join(UpperCAmelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_snake_case : Tuple = "encoder/layers_0/mlp/wi_0/kernel" in old
print("Split MLP:" , UpperCAmelCase )
_snake_case : List[Any] = collections.OrderedDict()
# Shared embeddings.
_snake_case : Tuple = old["token_embedder/embedding"]
# Encoder.
for i in range(UpperCAmelCase ):
# Block i, layer 0 (Self Attention).
_snake_case : Optional[Any] = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , "encoder" , "pre_attention_layer_norm" )
_snake_case , _snake_case , _snake_case , _snake_case : Union[str, Any] = tax_attention_lookup(UpperCAmelCase , UpperCAmelCase , "encoder" , "attention" )
_snake_case : Optional[Any] = layer_norm
_snake_case : int = k.T
_snake_case : Optional[int] = o.T
_snake_case : Dict = q.T
_snake_case : Any = v.T
# Block i, layer 1 (MLP).
_snake_case : Any = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , "encoder" , "pre_mlp_layer_norm" )
_snake_case , _snake_case : Tuple = tax_mlp_lookup(UpperCAmelCase , UpperCAmelCase , "encoder" , UpperCAmelCase )
_snake_case : Any = layer_norm
if split_mlp_wi:
_snake_case : Union[str, Any] = wi[0].T
_snake_case : List[str] = wi[1].T
else:
_snake_case : List[Any] = wi.T
_snake_case : Dict = wo.T
_snake_case : Dict = old[
"encoder/relpos_bias/rel_embedding"
].T
_snake_case : Dict = old["encoder/encoder_norm/scale"]
if not is_encoder_only:
# Decoder.
for i in range(UpperCAmelCase ):
# Block i, layer 0 (Self Attention).
_snake_case : str = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , "decoder" , "pre_self_attention_layer_norm" )
_snake_case , _snake_case , _snake_case , _snake_case : Optional[Any] = tax_attention_lookup(UpperCAmelCase , UpperCAmelCase , "decoder" , "self_attention" )
_snake_case : Optional[Any] = layer_norm
_snake_case : Dict = k.T
_snake_case : List[str] = o.T
_snake_case : List[Any] = q.T
_snake_case : List[Any] = v.T
# Block i, layer 1 (Cross Attention).
_snake_case : Tuple = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , "decoder" , "pre_cross_attention_layer_norm" )
_snake_case , _snake_case , _snake_case , _snake_case : Union[str, Any] = tax_attention_lookup(UpperCAmelCase , UpperCAmelCase , "decoder" , "encoder_decoder_attention" )
_snake_case : Tuple = layer_norm
_snake_case : Union[str, Any] = k.T
_snake_case : str = o.T
_snake_case : List[str] = q.T
_snake_case : List[Any] = v.T
# Block i, layer 2 (MLP).
_snake_case : Dict = tax_layer_norm_lookup(UpperCAmelCase , UpperCAmelCase , "decoder" , "pre_mlp_layer_norm" )
_snake_case , _snake_case : Union[str, Any] = tax_mlp_lookup(UpperCAmelCase , UpperCAmelCase , "decoder" , UpperCAmelCase )
_snake_case : Optional[Any] = layer_norm
if split_mlp_wi:
_snake_case : str = wi[0].T
_snake_case : Union[str, Any] = wi[1].T
else:
_snake_case : Optional[Any] = wi.T
_snake_case : int = wo.T
_snake_case : Optional[int] = old["decoder/decoder_norm/scale"]
_snake_case : int = old[
"decoder/relpos_bias/rel_embedding"
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_snake_case : Tuple = old["decoder/logits_dense/kernel"].T
return new
def A ( UpperCAmelCase , UpperCAmelCase ):
_snake_case : Tuple = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_snake_case : Tuple = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_snake_case : List[Any] = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
_snake_case : Optional[int] = state_dict["shared.weight"]
return state_dict
def A ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
_snake_case : List[str] = checkpoints.load_tax_checkpoint(UpperCAmelCase )
_snake_case : Dict = convert_tax_to_pytorch(UpperCAmelCase , num_layers=config.num_layers , is_encoder_only=UpperCAmelCase )
_snake_case : Dict = make_state_dict(UpperCAmelCase , UpperCAmelCase )
model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
def A ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False ):
_snake_case : str = TaConfig.from_json_file(UpperCAmelCase )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_snake_case : List[Any] = TaEncoderModel(UpperCAmelCase )
else:
_snake_case : List[str] = TaForConditionalGeneration(UpperCAmelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(UpperCAmelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(UpperCAmelCase )
print("Done" )
if __name__ == "__main__":
__lowerCAmelCase :Any = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
__lowerCAmelCase :Optional[int] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
) | 278 | 0 |
"""simple docstring"""
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
monkeypatch.setattr("""datasets.utils.deprecation_utils._emitted_deprecation_warnings""" , set() )
@pytest.fixture
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
class __lowercase :
def __init__( self : List[str] ,A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = metric_id
class __lowercase :
snake_case_ = [MetricMock(__lowerCamelCase ) for metric_id in ["""accuracy""", """mse""", """precision""", """codeparrot/apps_metric"""]]
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return self._metrics
monkeypatch.setattr("""datasets.inspect.huggingface_hub""" , HfhMock() )
@pytest.mark.parametrize(
"""func, args""" , [(load_metric, ("""metrics/mse""",)), (list_metrics, ()), (inspect_metric, ("""metrics/mse""", """tmp_path"""))] )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if "tmp_path" in args:
UpperCAmelCase__ : Any = tuple(arg if arg != """tmp_path""" else tmp_path for arg in args )
with pytest.warns(__UpperCamelCase , match="""https://huggingface.co/docs/evaluate""" ):
func(*__UpperCamelCase )
| 65 |
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def A_ ( lowercase , lowercase=False ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Any = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''module.blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''module.blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''module.blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''module.blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''module.blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''module.blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''module.blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''module.blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''module.blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''module.blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
UpperCAmelCase_ : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def A_ ( lowercase , lowercase , lowercase=False ) -> Union[str, Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
UpperCAmelCase_ : str = """"""
else:
UpperCAmelCase_ : str = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase_ : Any = state_dict.pop(f'''module.blocks.{i}.attn.qkv.weight''' )
UpperCAmelCase_ : List[Any] = state_dict.pop(f'''module.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : List[str] = in_proj_weight[
: config.hidden_size, :
]
UpperCAmelCase_ : List[str] = in_proj_bias[: config.hidden_size]
UpperCAmelCase_ : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase_ : Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase_ : Any = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase_ : Optional[int] = in_proj_bias[-config.hidden_size :]
def A_ ( lowercase ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowercase , lowercase )
def A_ ( lowercase ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(lowercase , lowercase )
def A_ ( lowercase , lowercase , lowercase ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : List[str] = dct.pop(lowercase )
UpperCAmelCase_ : Dict = val
def A_ ( lowercase , lowercase ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : str = ViTMSNConfig()
UpperCAmelCase_ : List[str] = 1000
UpperCAmelCase_ : str = """datasets/huggingface/label-files"""
UpperCAmelCase_ : str = """imagenet-1k-id2label.json"""
UpperCAmelCase_ : Union[str, Any] = json.load(open(hf_hub_download(lowercase , lowercase ) , """r""" ) )
UpperCAmelCase_ : Optional[int] = {int(lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase_ : Any = idalabel
UpperCAmelCase_ : Any = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
UpperCAmelCase_ : str = 384
UpperCAmelCase_ : List[Any] = 1536
UpperCAmelCase_ : Optional[Any] = 6
elif "l16" in checkpoint_url:
UpperCAmelCase_ : Any = 1024
UpperCAmelCase_ : Dict = 4096
UpperCAmelCase_ : Tuple = 24
UpperCAmelCase_ : List[Any] = 16
UpperCAmelCase_ : Optional[int] = 0.1
elif "b4" in checkpoint_url:
UpperCAmelCase_ : Union[str, Any] = 4
elif "l7" in checkpoint_url:
UpperCAmelCase_ : Optional[int] = 7
UpperCAmelCase_ : Union[str, Any] = 1024
UpperCAmelCase_ : Optional[int] = 4096
UpperCAmelCase_ : Union[str, Any] = 24
UpperCAmelCase_ : str = 16
UpperCAmelCase_ : List[Any] = 0.1
UpperCAmelCase_ : Optional[Any] = ViTMSNModel(lowercase )
UpperCAmelCase_ : str = torch.hub.load_state_dict_from_url(lowercase , map_location="""cpu""" )["""target_encoder"""]
UpperCAmelCase_ : Optional[Any] = ViTImageProcessor(size=config.image_size )
remove_projection_head(lowercase )
UpperCAmelCase_ : List[str] = create_rename_keys(lowercase , base_model=lowercase )
for src, dest in rename_keys:
rename_key(lowercase , lowercase , lowercase )
read_in_q_k_v(lowercase , lowercase , base_model=lowercase )
model.load_state_dict(lowercase )
model.eval()
UpperCAmelCase_ : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase_ : Any = Image.open(requests.get(lowercase , stream=lowercase ).raw )
UpperCAmelCase_ : int = ViTImageProcessor(
size=config.image_size , image_mean=lowercase , image_std=lowercase )
UpperCAmelCase_ : Tuple = image_processor(images=lowercase , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
UpperCAmelCase_ : int = model(**lowercase )
UpperCAmelCase_ : str = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
UpperCAmelCase_ : Optional[int] = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
UpperCAmelCase_ : str = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
UpperCAmelCase_ : List[str] = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
UpperCAmelCase_ : Any = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
UpperCAmelCase_ : Dict = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , lowercase , atol=1E-4 )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
lowercase_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 470 | 0 |
from __future__ import annotations
from math import pow, sqrt
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance == 0:
return {"resistance": sqrt(pow(__lowerCAmelCase , 2 ) - pow(__lowerCAmelCase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(__lowerCAmelCase , 2 ) - pow(__lowerCAmelCase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(__lowerCAmelCase , 2 ) + pow(__lowerCAmelCase , 2 ) )}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
lowercase_ : Tuple = '''
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowercase_ : Optional[int] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowercase_ : str = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 652 | 0 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase (__SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase = RobertaTokenizer
_UpperCAmelCase = RobertaTokenizerFast
_UpperCAmelCase = True
_UpperCAmelCase = {"""cls_token""": """<s>"""}
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
SCREAMING_SNAKE_CASE_ : str = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCAmelCase__ ) )
def UpperCamelCase__ ( self , **lowerCAmelCase__ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def UpperCamelCase__ ( self , **lowerCAmelCase__ ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def UpperCamelCase__ ( self , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = 'lower newer'
SCREAMING_SNAKE_CASE_ : Any = 'lower newer'
return input_text, output_text
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE_ : Optional[int] = 'lower newer'
SCREAMING_SNAKE_CASE_ : List[Any] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.tokenize(lowerCAmelCase__ ) # , add_prefix_space=True)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=lowerCAmelCase__ ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=lowerCAmelCase__ ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer_class.from_pretrained('roberta-base' )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.encode('sequence builders' , add_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.encode(
'sequence builders' , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'Encode this sequence.'
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Testing spaces after special tokens
SCREAMING_SNAKE_CASE_ : Any = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )} ) # mask token has a left space
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'Encode <mask> sequence'
SCREAMING_SNAKE_CASE_ : List[Any] = 'Encode <mask>sequence'
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.encode(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = encoded.index(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Any = tokenizer.encode(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = encoded.index(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE_ : str = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'A, <mask> AllenNLP sentence.'
SCREAMING_SNAKE_CASE_ : str = tokenizer_r.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer_p.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
lowerCAmelCase__ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , lowerCAmelCase__ )
self.assertEqual(post_processor_state['add_prefix_space'] , lowerCAmelCase__ )
self.assertEqual(post_processor_state['trim_offsets'] , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE_ : Tuple = F'''{text_of_1_token} {text_of_1_token}'''
SCREAMING_SNAKE_CASE_ : Tuple = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
SCREAMING_SNAKE_CASE_ : List[str] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
SCREAMING_SNAKE_CASE_ : List[str] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ), len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
SCREAMING_SNAKE_CASE_ : Any = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ), len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
SCREAMING_SNAKE_CASE_ : Any = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
SCREAMING_SNAKE_CASE_ : int = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ) + 1, 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
SCREAMING_SNAKE_CASE_ : int = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ), 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
SCREAMING_SNAKE_CASE_ : Dict = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ), 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
| 101 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __A( __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 1
@register_to_config
def __init__(self , SCREAMING_SNAKE_CASE_ = 10_00 , SCREAMING_SNAKE_CASE_ = None ):
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(SCREAMING_SNAKE_CASE_ )
# standard deviation of the initial noise distribution
UpperCamelCase__ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
UpperCamelCase__ = 4
# running values
UpperCamelCase__ = []
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
UpperCamelCase__ = num_inference_steps
UpperCamelCase__ = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
UpperCamelCase__ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
UpperCamelCase__ = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
UpperCamelCase__ = torch.sin(steps * math.pi / 2 ) ** 2
UpperCamelCase__ = (1.0 - self.betas**2) ** 0.5
UpperCamelCase__ = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
UpperCamelCase__ = timesteps.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = []
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , ):
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
UpperCamelCase__ = (self.timesteps == timestep).nonzero().item()
UpperCamelCase__ = timestep_index + 1
UpperCamelCase__ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(SCREAMING_SNAKE_CASE_ )
if len(self.ets ) == 1:
UpperCamelCase__ = self.ets[-1]
elif len(self.ets ) == 2:
UpperCamelCase__ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
UpperCamelCase__ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
UpperCamelCase__ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
UpperCamelCase__ = self._get_prev_sample(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
return sample
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = self.alphas[timestep_index]
UpperCamelCase__ = self.betas[timestep_index]
UpperCamelCase__ = self.alphas[prev_timestep_index]
UpperCamelCase__ = self.betas[prev_timestep_index]
UpperCamelCase__ = (sample - sigma * ets) / max(SCREAMING_SNAKE_CASE_ , 1E-8 )
UpperCamelCase__ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__(self ):
return self.config.num_train_timesteps
| 513 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''andreasmadsen/efficient_mlm_m0.40''': (
'''https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json'''
),
}
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
__SCREAMING_SNAKE_CASE : List[Any] = "roberta-prelayernorm"
def __init__( self , lowercase_=5_0_2_6_5 , lowercase_=7_6_8 , lowercase_=1_2 , lowercase_=1_2 , lowercase_=3_0_7_2 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=2 , lowercase_=0.0_2 , lowercase_=1E-12 , lowercase_=1 , lowercase_=0 , lowercase_=2 , lowercase_="absolute" , lowercase_=True , lowercase_=None , **lowercase_ , ) -> Optional[int]:
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = position_embedding_type
UpperCAmelCase = use_cache
UpperCAmelCase = classifier_dropout
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
@property
def a_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCAmelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 183 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
__SCREAMING_SNAKE_CASE : str = (DDPMScheduler,)
def a_ ( self , **lowercase_ ) -> Union[str, Any]:
UpperCAmelCase = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**lowercase_ )
return config
def a_ ( self ) -> int:
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def a_ ( self ) -> Optional[int]:
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def a_ ( self ) -> Dict:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_ )
def a_ ( self ) -> Union[str, Any]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowercase_ )
def a_ ( self ) -> Optional[Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase_ )
def a_ ( self ) -> Tuple:
self.check_over_configs(thresholding=lowercase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowercase_ , prediction_type=lowercase_ , sample_max_value=lowercase_ , )
def a_ ( self ) -> Any:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def a_ ( self ) -> Union[str, Any]:
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=lowercase_ )
def a_ ( self ) -> Optional[Any]:
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowercase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.0_2 ) ) < 1E-5
def a_ ( self ) -> str:
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowercase_ )
UpperCAmelCase = len(lowercase_ )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter
UpperCAmelCase = torch.manual_seed(0 )
for t in reversed(range(lowercase_ ) ):
# 1. predict noise residual
UpperCAmelCase = model(lowercase_ , lowercase_ )
# 2. predict previous mean of sample x_t-1
UpperCAmelCase = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCAmelCase = pred_prev_sample
UpperCAmelCase = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1E-3
def a_ ( self ) -> Union[str, Any]:
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(prediction_type='v_prediction' )
UpperCAmelCase = scheduler_class(**lowercase_ )
UpperCAmelCase = len(lowercase_ )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter
UpperCAmelCase = torch.manual_seed(0 )
for t in reversed(range(lowercase_ ) ):
# 1. predict noise residual
UpperCAmelCase = model(lowercase_ , lowercase_ )
# 2. predict previous mean of sample x_t-1
UpperCAmelCase = scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCAmelCase = pred_prev_sample
UpperCAmelCase = torch.sum(torch.abs(lowercase_ ) )
UpperCAmelCase = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1E-3
def a_ ( self ) -> Optional[Any]:
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowercase_ )
UpperCAmelCase = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=lowercase_ )
UpperCAmelCase = scheduler.timesteps
for i, timestep in enumerate(lowercase_ ):
if i == len(lowercase_ ) - 1:
UpperCAmelCase = -1
else:
UpperCAmelCase = timesteps[i + 1]
UpperCAmelCase = scheduler.previous_timestep(lowercase_ )
UpperCAmelCase = prev_t.item()
self.assertEqual(lowercase_ , lowercase_ )
def a_ ( self ) -> Optional[Any]:
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowercase_ )
UpperCAmelCase = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(lowercase_ , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=lowercase_ )
def a_ ( self ) -> List[Any]:
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowercase_ )
UpperCAmelCase = [1_0_0, 8_7, 5_0, 1, 0]
UpperCAmelCase = len(lowercase_ )
with self.assertRaises(lowercase_ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=lowercase_ , timesteps=lowercase_ )
def a_ ( self ) -> str:
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**lowercase_ )
UpperCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowercase_ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=lowercase_ )
| 183 | 1 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowercase :
def __init__(self : str , snake_case : Union[str, Any] , snake_case : Union[str, Any]=13 , snake_case : Tuple=32 , snake_case : Optional[Any]=2 , snake_case : Union[str, Any]=3 , snake_case : Union[str, Any]=16 , snake_case : Optional[int]=[1, 2, 1] , snake_case : List[Any]=[2, 2, 4] , snake_case : Optional[Any]=2 , snake_case : Union[str, Any]=2.0 , snake_case : Any=True , snake_case : Tuple=0.0 , snake_case : Optional[Any]=0.0 , snake_case : List[Any]=0.1 , snake_case : Union[str, Any]="gelu" , snake_case : Optional[int]=False , snake_case : int=True , snake_case : Optional[int]=0.02 , snake_case : int=1e-5 , snake_case : Any=True , snake_case : List[str]=None , snake_case : Any=True , snake_case : str=10 , snake_case : Union[str, Any]=8 , ) -> List[str]:
_lowercase : Optional[Any] = parent
_lowercase : int = batch_size
_lowercase : Union[str, Any] = image_size
_lowercase : str = patch_size
_lowercase : int = num_channels
_lowercase : Dict = embed_dim
_lowercase : Any = depths
_lowercase : str = num_heads
_lowercase : int = window_size
_lowercase : List[str] = mlp_ratio
_lowercase : Union[str, Any] = qkv_bias
_lowercase : str = hidden_dropout_prob
_lowercase : List[Any] = attention_probs_dropout_prob
_lowercase : str = drop_path_rate
_lowercase : int = hidden_act
_lowercase : Dict = use_absolute_embeddings
_lowercase : str = patch_norm
_lowercase : Optional[int] = layer_norm_eps
_lowercase : int = initializer_range
_lowercase : List[Any] = is_training
_lowercase : str = scope
_lowercase : Union[str, Any] = use_labels
_lowercase : List[str] = type_sequence_label_size
_lowercase : List[str] = encoder_stride
def _a(self : List[str] ) -> Optional[int]:
_lowercase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase : Dict = None
if self.use_labels:
_lowercase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : List[Any] = self.get_config()
return config, pixel_values, labels
def _a(self : Union[str, Any] ) -> Tuple:
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _a(self : Dict , snake_case : List[Any] , snake_case : List[str] , snake_case : List[Any] ) -> Dict:
_lowercase : List[str] = SwinvaModel(config=_lowercase )
model.to(_lowercase )
model.eval()
_lowercase : Tuple = model(_lowercase )
_lowercase : Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_lowercase : str = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _a(self : Tuple , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : Any ) -> List[Any]:
_lowercase : List[Any] = SwinvaForMaskedImageModeling(config=_lowercase )
model.to(_lowercase )
model.eval()
_lowercase : Optional[int] = model(_lowercase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_lowercase : List[Any] = 1
_lowercase : List[str] = SwinvaForMaskedImageModeling(_lowercase )
model.to(_lowercase )
model.eval()
_lowercase : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowercase : Optional[int] = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _a(self : int , snake_case : Union[str, Any] , snake_case : List[Any] , snake_case : Any ) -> Optional[Any]:
_lowercase : Tuple = self.type_sequence_label_size
_lowercase : str = SwinvaForImageClassification(_lowercase )
model.to(_lowercase )
model.eval()
_lowercase : int = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a(self : List[Any] ) -> Dict:
_lowercase : Any = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase : Dict = config_and_inputs
_lowercase : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( __snake_case , __snake_case , unittest.TestCase ):
_A = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
_A = (
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
_A = False
_A = False
_A = False
_A = False
def _a(self : Optional[int] ) -> Optional[int]:
_lowercase : str = SwinvaModelTester(self )
_lowercase : int = ConfigTester(self , config_class=_lowercase , embed_dim=37 )
def _a(self : str ) -> Any:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a(self : List[Any] ) -> int:
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
@unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0." )
def _a(self : int ) -> Optional[int]:
pass
@unittest.skip(reason="Swinv2 does not use inputs_embeds" )
def _a(self : Any ) -> Union[str, Any]:
pass
def _a(self : int ) -> Optional[Any]:
_lowercase , _lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Optional[int] = model_class(_lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowercase : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowercase , nn.Linear ) )
def _a(self : Any ) -> Optional[int]:
_lowercase , _lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase : Tuple = model_class(_lowercase )
_lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase : List[Any] = [*signature.parameters.keys()]
_lowercase : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowercase )
def _a(self : Optional[int] ) -> Dict:
_lowercase , _lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : int = True
for model_class in self.all_model_classes:
_lowercase : Union[str, Any] = True
_lowercase : Union[str, Any] = False
_lowercase : List[str] = True
_lowercase : List[str] = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
_lowercase : List[str] = model(**self._prepare_for_class(_lowercase , _lowercase ) )
_lowercase : List[str] = outputs.attentions
_lowercase : List[Any] = len(self.model_tester.depths )
self.assertEqual(len(_lowercase ) , _lowercase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowercase : Dict = True
_lowercase : List[str] = config.window_size**2
_lowercase : List[Any] = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
_lowercase : str = model(**self._prepare_for_class(_lowercase , _lowercase ) )
_lowercase : List[Any] = outputs.attentions
self.assertEqual(len(_lowercase ) , _lowercase )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
_lowercase : Tuple = len(_lowercase )
# Check attention is always last and order is fine
_lowercase : Optional[Any] = True
_lowercase : List[Any] = True
_lowercase : List[Any] = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
_lowercase : str = model(**self._prepare_for_class(_lowercase , _lowercase ) )
if hasattr(self.model_tester , "num_hidden_states_types" ):
_lowercase : Optional[int] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
_lowercase : Union[str, Any] = 2
self.assertEqual(out_len + added_hidden_states , len(_lowercase ) )
_lowercase : Optional[Any] = outputs.attentions
self.assertEqual(len(_lowercase ) , _lowercase )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def _a(self : Dict , snake_case : List[str] , snake_case : str , snake_case : List[str] , snake_case : Tuple ) -> Any:
_lowercase : Tuple = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
_lowercase : Tuple = model(**self._prepare_for_class(_lowercase , _lowercase ) )
_lowercase : Union[str, Any] = outputs.hidden_states
_lowercase : str = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_lowercase ) , _lowercase )
# Swinv2 has a different seq_length
_lowercase : Union[str, Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowercase : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
_lowercase : Dict = outputs.reshaped_hidden_states
self.assertEqual(len(_lowercase ) , _lowercase )
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = reshaped_hidden_states[0].shape
_lowercase : Tuple = (
reshaped_hidden_states[0].view(_lowercase , _lowercase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _a(self : int ) -> List[str]:
_lowercase , _lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_lowercase : Union[str, Any] = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase : Dict = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , _lowercase )
def _a(self : Optional[int] ) -> Union[str, Any]:
_lowercase , _lowercase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : List[Any] = 3
_lowercase : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_lowercase : int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_lowercase : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_lowercase : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_lowercase : Optional[int] = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowercase : Tuple = True
self.check_hidden_states_output(_lowercase , _lowercase , _lowercase , (padded_height, padded_width) )
def _a(self : Tuple ) -> List[str]:
_lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowercase )
def _a(self : Any ) -> str:
_lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
@slow
def _a(self : Union[str, Any] ) -> Optional[int]:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Tuple = SwinvaModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def _a(self : Union[str, Any] ) -> int:
_lowercase , _lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase : Union[str, Any] = _config_zero_init(_lowercase )
for model_class in self.all_model_classes:
_lowercase : str = model_class(config=_lowercase )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class __lowercase ( unittest.TestCase ):
@cached_property
def _a(self : List[Any] ) -> Optional[Any]:
return (
AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" )
if is_vision_available()
else None
)
@slow
def _a(self : List[Any] ) -> Optional[Any]:
_lowercase : Any = SwinvaForImageClassification.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" ).to(
_lowercase )
_lowercase : str = self.default_image_processor
_lowercase : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_lowercase : Optional[Any] = image_processor(images=_lowercase , return_tensors="pt" ).to(_lowercase )
# forward pass
with torch.no_grad():
_lowercase : Tuple = model(**_lowercase )
# verify the logits
_lowercase : Any = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowercase )
_lowercase : str = torch.tensor([-0.39_47, -0.43_06, 0.00_26] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
| 461 |
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ = 4_0_0_0_0_0_0 )-> int:
"""simple docstring"""
UpperCamelCase_ = [0, 1]
UpperCamelCase_ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
UpperCamelCase_ = 0
for j in range(len(SCREAMING_SNAKE_CASE_ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'''{solution() = }''')
| 628 | 0 |
'''simple docstring'''
class lowercase :
'''simple docstring'''
def __init__( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowerCamelCase__ = {}
def a__ ( self : Union[str, Any] ) -> None:
'''simple docstring'''
print(self.vertex )
for i in self.vertex:
print(__lowerCamelCase , " -> " , " -> ".join([str(__lowerCamelCase ) for j in self.vertex[i]] ) )
def a__ ( self : Any , __lowerCamelCase : int , __lowerCamelCase : int ) -> None:
'''simple docstring'''
if from_vertex in self.vertex:
self.vertex[from_vertex].append(__lowerCamelCase )
else:
# else make a new vertex
lowerCamelCase__ = [to_vertex]
def a__ ( self : List[str] ) -> None:
'''simple docstring'''
lowerCamelCase__ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(__lowerCamelCase , __lowerCamelCase )
def a__ ( self : str , __lowerCamelCase : int , __lowerCamelCase : list ) -> None:
'''simple docstring'''
lowerCamelCase__ = True
print(__lowerCamelCase , end=" " )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
__A : int = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 187 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = StableDiffusionDiffEditPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"}
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"}
lowerCAmelCase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase__ = frozenset([] )
def a__ ( self : int ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__lowerCamelCase , )
lowerCamelCase__ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , )
lowerCamelCase__ = DDIMInverseScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=__lowerCamelCase , set_alpha_to_zero=__lowerCamelCase , )
torch.manual_seed(0 )
lowerCamelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
lowerCamelCase__ = CLIPTextModel(__lowerCamelCase )
lowerCamelCase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCamelCase__ = {
"unet": unet,
"scheduler": scheduler,
"inverse_scheduler": inverse_scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def a__ ( self : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any=0 ) -> Tuple:
'''simple docstring'''
lowerCamelCase__ = floats_tensor((1, 16, 16) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
lowerCamelCase__ = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
if str(__lowerCamelCase ).startswith("mps" ):
lowerCamelCase__ = torch.manual_seed(__lowerCamelCase )
else:
lowerCamelCase__ = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
lowerCamelCase__ = {
"prompt": "a dog and a newt",
"mask_image": mask,
"image_latents": latents,
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def a__ ( self : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int]=0 ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
lowerCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase__ = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("RGB" )
if str(__lowerCamelCase ).startswith("mps" ):
lowerCamelCase__ = torch.manual_seed(__lowerCamelCase )
else:
lowerCamelCase__ = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
lowerCamelCase__ = {
"image": image,
"source_prompt": "a cat and a frog",
"target_prompt": "a dog and a newt",
"generator": generator,
"num_inference_steps": 2,
"num_maps_per_mask": 2,
"mask_encode_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def a__ ( self : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str]=0 ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
lowerCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase__ = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("RGB" )
if str(__lowerCamelCase ).startswith("mps" ):
lowerCamelCase__ = torch.manual_seed(__lowerCamelCase )
else:
lowerCamelCase__ = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
lowerCamelCase__ = {
"image": image,
"prompt": "a cat and a frog",
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"decode_latents": True,
"output_type": "numpy",
}
return inputs
def a__ ( self : Tuple ) -> str:
'''simple docstring'''
if not hasattr(self.pipeline_class , "_optional_components" ):
return
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = self.pipeline_class(**__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowerCamelCase__ = self.get_dummy_inputs(__lowerCamelCase )
lowerCamelCase__ = pipe(**__lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__lowerCamelCase )
lowerCamelCase__ = self.pipeline_class.from_pretrained(__lowerCamelCase )
pipe_loaded.to(__lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=__lowerCamelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__lowerCamelCase , __lowerCamelCase ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
lowerCamelCase__ = self.get_dummy_inputs(__lowerCamelCase )
lowerCamelCase__ = pipe_loaded(**__lowerCamelCase )[0]
lowerCamelCase__ = np.abs(output - output_loaded ).max()
self.assertLess(__lowerCamelCase , 1E-4 )
def a__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__ = "cpu"
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = self.pipeline_class(**__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCamelCase__ = self.get_dummy_mask_inputs(__lowerCamelCase )
lowerCamelCase__ = pipe.generate_mask(**__lowerCamelCase )
lowerCamelCase__ = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowerCamelCase__ = np.array([0] * 9 )
lowerCamelCase__ = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCamelCase , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def a__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
lowerCamelCase__ = "cpu"
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = self.pipeline_class(**__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCamelCase__ = self.get_dummy_inversion_inputs(__lowerCamelCase )
lowerCamelCase__ = pipe.invert(**__lowerCamelCase ).images
lowerCamelCase__ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCamelCase__ = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , )
lowerCamelCase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCamelCase , 1E-3 )
def a__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def a__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowerCamelCase__ = "cpu"
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = {"beta_start": 0.0_0_0_8_5, "beta_end": 0.0_1_2, "beta_schedule": "scaled_linear"}
lowerCamelCase__ = DPMSolverMultistepScheduler(**__lowerCamelCase )
lowerCamelCase__ = DPMSolverMultistepInverseScheduler(**__lowerCamelCase )
lowerCamelCase__ = self.pipeline_class(**__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCamelCase__ = self.get_dummy_inversion_inputs(__lowerCamelCase )
lowerCamelCase__ = pipe.invert(**__lowerCamelCase ).images
lowerCamelCase__ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCamelCase__ = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , )
lowerCamelCase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCamelCase , 1E-3 )
@require_torch_gpu
@slow
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Optional[int] ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def a__ ( cls : int ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" )
lowerCamelCase__ = raw_image.convert("RGB" ).resize((768, 768) )
lowerCamelCase__ = raw_image
def a__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=__lowerCamelCase , torch_dtype=torch.floataa )
lowerCamelCase__ = DDIMScheduler.from_config(pipe.scheduler.config )
lowerCamelCase__ = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCamelCase__ = "a bowl of fruit"
lowerCamelCase__ = "a bowl of pears"
lowerCamelCase__ = pipe.generate_mask(
image=self.raw_image , source_prompt=__lowerCamelCase , target_prompt=__lowerCamelCase , generator=__lowerCamelCase , )
lowerCamelCase__ = pipe.invert(
prompt=__lowerCamelCase , image=self.raw_image , inpaint_strength=0.7 , generator=__lowerCamelCase ).latents
lowerCamelCase__ = pipe(
prompt=__lowerCamelCase , mask_image=__lowerCamelCase , image_latents=__lowerCamelCase , generator=__lowerCamelCase , negative_prompt=__lowerCamelCase , inpaint_strength=0.7 , output_type="numpy" , ).images[0]
lowerCamelCase__ = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def a__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=__lowerCamelCase , torch_dtype=torch.floataa )
lowerCamelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowerCamelCase__ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCamelCase__ = "a bowl of fruit"
lowerCamelCase__ = "a bowl of pears"
lowerCamelCase__ = pipe.generate_mask(
image=self.raw_image , source_prompt=__lowerCamelCase , target_prompt=__lowerCamelCase , generator=__lowerCamelCase , )
lowerCamelCase__ = pipe.invert(
prompt=__lowerCamelCase , image=self.raw_image , inpaint_strength=0.7 , generator=__lowerCamelCase , num_inference_steps=25 , ).latents
lowerCamelCase__ = pipe(
prompt=__lowerCamelCase , mask_image=__lowerCamelCase , image_latents=__lowerCamelCase , generator=__lowerCamelCase , negative_prompt=__lowerCamelCase , inpaint_strength=0.7 , num_inference_steps=25 , output_type="numpy" , ).images[0]
lowerCamelCase__ = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 187 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCAmelCase = {"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMAEForPreTraining""",
"""ViTMAELayer""",
"""ViTMAEModel""",
"""ViTMAEPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""TFViTMAEForPreTraining""",
"""TFViTMAEModel""",
"""TFViTMAEPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 137 |
import argparse
import os
import re
import packaging.version
_lowerCAmelCase = """examples/"""
_lowerCAmelCase = {
"""examples""": (re.compile(R"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(R"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(R"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), R"""\1version=\"VERSION\","""),
"""doc""": (re.compile(R"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
_lowerCAmelCase = {
"""init""": """src/transformers/__init__.py""",
"""setup""": """setup.py""",
}
_lowerCAmelCase = """README.md"""
def lowercase ( _a ,_a ,_a ) -> List[Any]:
with open(_a ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
UpperCAmelCase_: List[str] = f.read()
UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = REPLACE_PATTERNS[pattern]
UpperCAmelCase_: List[Any] = replace.replace("VERSION" ,_a )
UpperCAmelCase_: str = re_pattern.sub(_a ,_a )
with open(_a ,"w" ,encoding="utf-8" ,newline="\n" ) as f:
f.write(_a )
def lowercase ( _a ) -> List[str]:
for folder, directories, fnames in os.walk(_a ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(_a ,_a ) ,_a ,pattern="examples" )
def lowercase ( _a ,_a=False ) -> Optional[Any]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_a ,_a ,_a )
if not patch:
update_version_in_examples(_a )
def lowercase ( ) -> List[str]:
UpperCAmelCase_: int = "🤗 Transformers currently provides the following architectures"
UpperCAmelCase_: Dict = "1. Want to contribute a new model?"
with open(_a ,"r" ,encoding="utf-8" ,newline="\n" ) as f:
UpperCAmelCase_: Tuple = f.readlines()
# Find the start of the list.
UpperCAmelCase_: Optional[Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
UpperCAmelCase_: Tuple = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
UpperCAmelCase_: str = lines[index].replace(
"https://huggingface.co/docs/transformers/main/model_doc" ,"https://huggingface.co/docs/transformers/model_doc" ,)
index += 1
with open(_a ,"w" ,encoding="utf-8" ,newline="\n" ) as f:
f.writelines(_a )
def lowercase ( ) -> int:
with open(REPLACE_FILES["init"] ,"r" ) as f:
UpperCAmelCase_: List[str] = f.read()
UpperCAmelCase_: List[Any] = REPLACE_PATTERNS["init"][0].search(_a ).groups()[0]
return packaging.version.parse(_a )
def lowercase ( _a=False ) -> Optional[Any]:
UpperCAmelCase_: Optional[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
UpperCAmelCase_: int = default_version.base_version
elif patch:
UpperCAmelCase_: Tuple = f"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
UpperCAmelCase_: int = f"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
UpperCAmelCase_: Dict = input(f"Which version are you releasing? [{default_version}]" )
if len(_a ) == 0:
UpperCAmelCase_: Union[str, Any] = default_version
print(f"Updating version to {version}." )
global_version_update(_a ,patch=_a )
if not patch:
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
def lowercase ( ) -> Union[str, Any]:
UpperCAmelCase_: Any = get_version()
UpperCAmelCase_: List[Any] = f"{current_version.major}.{current_version.minor + 1}.0.dev0"
UpperCAmelCase_: int = current_version.base_version
# Check with the user we got that right.
UpperCAmelCase_: Any = input(f"Which version are we developing now? [{dev_version}]" )
if len(_a ) == 0:
UpperCAmelCase_: str = dev_version
print(f"Updating version to {version}." )
global_version_update(_a )
print("Cleaning main README, don't forget to run `make fix-copies`." )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
_lowerCAmelCase = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work() | 137 | 1 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_A : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class a ( _UpperCamelCase ,_UpperCamelCase ):
@register_to_config
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : bool , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None ):
super().__init__()
__lowerCamelCase: Any = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
__lowerCamelCase: int = torch.zeros(__a , __a )
else:
__lowerCamelCase: List[str] = None
__lowerCamelCase: str = torch.nn.Parameter(__a )
class a ( _UpperCamelCase ):
UpperCAmelCase__ : VQModel
UpperCAmelCase__ : CLIPTextModel
UpperCAmelCase__ : CLIPTokenizer
UpperCAmelCase__ : TransformeraDModel
UpperCAmelCase__ : LearnedClassifierFreeSamplingEmbeddings
UpperCAmelCase__ : VQDiffusionScheduler
def __init__( self : int , SCREAMING_SNAKE_CASE_ : VQModel , SCREAMING_SNAKE_CASE_ : CLIPTextModel , SCREAMING_SNAKE_CASE_ : CLIPTokenizer , SCREAMING_SNAKE_CASE_ : TransformeraDModel , SCREAMING_SNAKE_CASE_ : VQDiffusionScheduler , SCREAMING_SNAKE_CASE_ : LearnedClassifierFreeSamplingEmbeddings , ):
super().__init__()
self.register_modules(
vqvae=__a , transformer=__a , text_encoder=__a , tokenizer=__a , scheduler=__a , learned_classifier_free_sampling_embeddings=__a , )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ):
__lowerCamelCase: int = len(__a ) if isinstance(__a , __a ) else 1
# get prompt text embeddings
__lowerCamelCase: Dict = self.tokenizer(
__a , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__lowerCamelCase: Optional[int] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__lowerCamelCase: Optional[Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
__lowerCamelCase: List[str] = text_input_ids[:, : self.tokenizer.model_max_length]
__lowerCamelCase: List[Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
__lowerCamelCase: int = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=__a )
# duplicate text embeddings for each generation per prompt
__lowerCamelCase: List[str] = prompt_embeds.repeat_interleave(__a , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
__lowerCamelCase: Any = self.learned_classifier_free_sampling_embeddings.embeddings
__lowerCamelCase: str = negative_prompt_embeds.unsqueeze(0 ).repeat(__a , 1 , 1 )
else:
__lowerCamelCase: Tuple = [""] * batch_size
__lowerCamelCase: Any = text_input_ids.shape[-1]
__lowerCamelCase: Optional[int] = self.tokenizer(
__a , padding="""max_length""" , max_length=__a , truncation=__a , return_tensors="""pt""" , )
__lowerCamelCase: Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
__lowerCamelCase: Optional[int] = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=__a )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowerCamelCase: Tuple = negative_prompt_embeds.shape[1]
__lowerCamelCase: Dict = negative_prompt_embeds.repeat(1 , __a , 1 )
__lowerCamelCase: List[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , __a , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCamelCase: str = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, List[str]] , SCREAMING_SNAKE_CASE_ : int = 100 , SCREAMING_SNAKE_CASE_ : float = 5.0 , SCREAMING_SNAKE_CASE_ : float = 1.0 , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE_ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , SCREAMING_SNAKE_CASE_ : int = 1 , ):
if isinstance(__a , __a ):
__lowerCamelCase: Tuple = 1
elif isinstance(__a , __a ):
__lowerCamelCase: List[Any] = len(__a )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(__a )}' )
__lowerCamelCase: Optional[Any] = batch_size * num_images_per_prompt
__lowerCamelCase: Optional[int] = guidance_scale > 1.0
__lowerCamelCase: int = self._encode_prompt(__a , __a , __a )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__a , __a ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(__a )}.' )
# get the initial completely masked latents unless the user supplied it
__lowerCamelCase: Union[str, Any] = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
__lowerCamelCase: Any = self.transformer.num_vector_embeds - 1
__lowerCamelCase: List[str] = torch.full(__a , __a ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"""Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"""
F' {self.transformer.num_vector_embeds - 1} (inclusive).' )
__lowerCamelCase: List[Any] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__a , device=self.device )
__lowerCamelCase: Dict = self.scheduler.timesteps.to(self.device )
__lowerCamelCase: Dict = latents
for i, t in enumerate(self.progress_bar(__a ) ):
# expand the sample if we are doing classifier free guidance
__lowerCamelCase: int = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
__lowerCamelCase: str = self.transformer(__a , encoder_hidden_states=__a , timestep=__a ).sample
if do_classifier_free_guidance:
__lowerCamelCase: Dict = model_output.chunk(2 )
__lowerCamelCase: str = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__a , dim=1 , keepdim=__a )
__lowerCamelCase: Any = self.truncate(__a , __a )
# remove `log(0)`'s (`-inf`s)
__lowerCamelCase: int = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase: Dict = self.scheduler.step(__a , timestep=__a , sample=__a , generator=__a ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__a , __a , __a )
__lowerCamelCase: Union[str, Any] = self.vqvae.config.vq_embed_dim
__lowerCamelCase: Dict = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
__lowerCamelCase: Tuple = self.vqvae.quantize.get_codebook_entry(__a , shape=__a )
__lowerCamelCase: int = self.vqvae.decode(__a , force_not_quantize=__a ).sample
__lowerCamelCase: Tuple = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCamelCase: Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCamelCase: Dict = self.numpy_to_pil(__a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__a )
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float ):
__lowerCamelCase: str = torch.sort(__a , 1 , descending=__a )
__lowerCamelCase: Union[str, Any] = torch.exp(__a )
__lowerCamelCase: Any = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
__lowerCamelCase: Optional[int] = torch.full_like(keep_mask[:, 0:1, :] , __a )
__lowerCamelCase: List[Any] = torch.cat((all_true, keep_mask) , dim=1 )
__lowerCamelCase: List[str] = keep_mask[:, :-1, :]
__lowerCamelCase: Union[str, Any] = keep_mask.gather(1 , indices.argsort(1 ) )
__lowerCamelCase: List[Any] = log_p_x_0.clone()
__lowerCamelCase: Tuple = -torch.inf # -inf = log(0)
return rv
| 707 |
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a :
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int=13 , SCREAMING_SNAKE_CASE_ : str=[30, 30] , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : List[Any]=3 , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=32 , SCREAMING_SNAKE_CASE_ : Tuple=5 , SCREAMING_SNAKE_CASE_ : Tuple=4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=37 , SCREAMING_SNAKE_CASE_ : Dict="gelu" , SCREAMING_SNAKE_CASE_ : Any=0.1 , SCREAMING_SNAKE_CASE_ : Tuple=0.1 , SCREAMING_SNAKE_CASE_ : Tuple=10 , SCREAMING_SNAKE_CASE_ : List[str]=0.02 , SCREAMING_SNAKE_CASE_ : Dict=3 , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : int=8 , SCREAMING_SNAKE_CASE_ : Dict=10 , ):
__lowerCamelCase: Optional[int] = parent
__lowerCamelCase: Tuple = batch_size
__lowerCamelCase: Optional[Any] = image_size
__lowerCamelCase: str = patch_size
__lowerCamelCase: Dict = num_channels
__lowerCamelCase: Optional[int] = is_training
__lowerCamelCase: Union[str, Any] = use_labels
__lowerCamelCase: Union[str, Any] = hidden_size
__lowerCamelCase: Dict = num_hidden_layers
__lowerCamelCase: int = num_attention_heads
__lowerCamelCase: Optional[int] = intermediate_size
__lowerCamelCase: Union[str, Any] = hidden_act
__lowerCamelCase: List[str] = hidden_dropout_prob
__lowerCamelCase: Optional[int] = attention_probs_dropout_prob
__lowerCamelCase: List[str] = type_sequence_label_size
__lowerCamelCase: Dict = initializer_range
__lowerCamelCase: Optional[Any] = num_labels
__lowerCamelCase: Tuple = scope
__lowerCamelCase: Tuple = n_targets
__lowerCamelCase: Tuple = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
__lowerCamelCase: Any = (image_size[1] // patch_size) * (image_size[0] // patch_size)
__lowerCamelCase: str = num_patches + 1 + self.num_detection_tokens
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
__lowerCamelCase: List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
__lowerCamelCase: Optional[int] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
__lowerCamelCase: Optional[int] = []
for i in range(self.batch_size ):
__lowerCamelCase: Optional[int] = {}
__lowerCamelCase: Optional[int] = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[str] = torch.rand(self.n_targets , 4 , device=SCREAMING_SNAKE_CASE_ )
labels.append(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: int = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict ):
__lowerCamelCase: List[Any] = YolosModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__lowerCamelCase: Optional[int] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str ):
__lowerCamelCase: int = YolosForObjectDetection(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__lowerCamelCase: List[Any] = model(pixel_values=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: str = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
__lowerCamelCase: Any = model(pixel_values=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
__lowerCamelCase: Union[str, Any] = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase: Optional[int] = config_and_inputs
__lowerCamelCase: Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( _UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
UpperCAmelCase__ : Any = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
UpperCAmelCase__ : Optional[int] = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : str = False
UpperCAmelCase__ : List[str] = False
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str]=False ):
__lowerCamelCase: Any = super()._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
__lowerCamelCase: str = []
for i in range(self.model_tester.batch_size ):
__lowerCamelCase: List[Any] = {}
__lowerCamelCase: List[Any] = torch.ones(
size=(self.model_tester.n_targets,) , device=SCREAMING_SNAKE_CASE_ , dtype=torch.long )
__lowerCamelCase: Optional[int] = torch.ones(
self.model_tester.n_targets , 4 , device=SCREAMING_SNAKE_CASE_ , dtype=torch.float )
labels.append(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Optional[Any] = labels
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
__lowerCamelCase: Dict = YolosModelTester(self )
__lowerCamelCase: str = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
# YOLOS does not use inputs_embeds
pass
def SCREAMING_SNAKE_CASE__ ( self : int ):
__lowerCamelCase , __lowerCamelCase: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase: int = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCamelCase: Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
__lowerCamelCase , __lowerCamelCase: int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase: Optional[int] = model_class(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase: Any = [*signature.parameters.keys()]
__lowerCamelCase: Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
__lowerCamelCase: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
__lowerCamelCase , __lowerCamelCase: Any = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase: str = True
# in YOLOS, the seq_len is different
__lowerCamelCase: List[Any] = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
__lowerCamelCase: Dict = True
__lowerCamelCase: Union[str, Any] = False
__lowerCamelCase: Any = True
__lowerCamelCase: str = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
__lowerCamelCase: Tuple = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
__lowerCamelCase: str = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCamelCase: Tuple = True
__lowerCamelCase: Optional[int] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
__lowerCamelCase: str = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
__lowerCamelCase: str = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__lowerCamelCase: List[str] = len(SCREAMING_SNAKE_CASE_ )
# Check attention is always last and order is fine
__lowerCamelCase: Any = True
__lowerCamelCase: Optional[Any] = True
__lowerCamelCase: List[str] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
__lowerCamelCase: Optional[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
__lowerCamelCase: Tuple = 1
self.assertEqual(out_len + added_hidden_states , len(SCREAMING_SNAKE_CASE_ ) )
__lowerCamelCase: Any = outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
__lowerCamelCase: List[Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
__lowerCamelCase: str = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
__lowerCamelCase: Any = outputs.hidden_states
__lowerCamelCase: int = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# YOLOS has a different seq_length
__lowerCamelCase: Optional[int] = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__lowerCamelCase , __lowerCamelCase: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase: Any = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase: int = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
__lowerCamelCase: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*SCREAMING_SNAKE_CASE_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase: str = YolosModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def __lowerCAmelCase ( ) -> Optional[Any]:
__lowerCamelCase: Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : str ):
return AutoImageProcessor.from_pretrained("""hustvl/yolos-small""" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
__lowerCamelCase: int = YolosForObjectDetection.from_pretrained("""hustvl/yolos-small""" ).to(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[Any] = self.default_image_processor
__lowerCamelCase: Optional[int] = prepare_img()
__lowerCamelCase: Optional[Any] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
__lowerCamelCase: str = model(inputs.pixel_values )
# verify outputs
__lowerCamelCase: Optional[int] = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: List[str] = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase: Optional[Any] = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
# verify postprocessing
__lowerCamelCase: Union[str, Any] = image_processor.post_process_object_detection(
SCREAMING_SNAKE_CASE_ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
__lowerCamelCase: Optional[Any] = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase: Any = [75, 75, 17, 63, 17]
__lowerCamelCase: Optional[Any] = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(results["""scores"""] ) , 5 )
self.assertTrue(torch.allclose(results["""scores"""] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
self.assertSequenceEqual(results["""labels"""].tolist() , SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(results["""boxes"""][0, :] , SCREAMING_SNAKE_CASE_ ) )
| 189 | 0 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Dict = VideoToVideoSDPipeline
__lowercase : Dict = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'''video'''}) - {'''image''', '''width''', '''height'''}
__lowercase : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''video'''}) - {'''image'''}
__lowercase : Optional[int] = PipelineTesterMixin.required_optional_params - {'''latents'''}
__lowercase : Any = False
# No `output_type`.
__lowercase : Union[str, Any] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
])
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , )
__snake_case = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
__snake_case = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
__snake_case = CLIPTextModel(__SCREAMING_SNAKE_CASE )
__snake_case = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__snake_case = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
__snake_case = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__snake_case = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''video''': video,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
__snake_case = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__snake_case = self.get_dummy_components()
__snake_case = VideoToVideoSDPipeline(**__SCREAMING_SNAKE_CASE )
__snake_case = sd_pipe.to(__SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__snake_case = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
__snake_case = '''np'''
__snake_case = sd_pipe(**__SCREAMING_SNAKE_CASE ).frames
__snake_case = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
__snake_case = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__SCREAMING_SNAKE_CASE , expected_max_diff=5E-3 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def lowerCAmelCase ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
pass
def lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class lowerCAmelCase ( unittest.TestCase):
def lowerCAmelCase ( self ) -> str:
'''simple docstring'''
__snake_case = VideoToVideoSDPipeline.from_pretrained('''cerspense/zeroscope_v2_XL''' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
__snake_case = torch.Generator(device='''cpu''' ).manual_seed(0 )
__snake_case = torch.randn((1, 10, 3, 1024, 576) , generator=__SCREAMING_SNAKE_CASE )
__snake_case = video.to('''cuda''' )
__snake_case = '''Spiderman is surfing'''
__snake_case = pipe(__SCREAMING_SNAKE_CASE , video=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=3 , output_type='''pt''' ).frames
__snake_case = np.array([-1.0_458_984, -1.1_279_297, -0.9_663_086, -0.91_503_906, -0.75_097_656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 24 |
'''simple docstring'''
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
snake_case__ : int = multiprocessing.Manager()
snake_case__ : Any = manager.list()
snake_case__ : List[str] = multiprocessing.Process(target=__SCREAMING_SNAKE_CASE , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('timed out' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
snake_case__ : List[Any] = shutil.rmtree
snake_case__ : Tuple = os.rmdir
snake_case__ : Any = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
snake_case__ : Union[str, Any] = {}
with swallow_io():
with time_limit(__SCREAMING_SNAKE_CASE ):
exec(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
result.append('passed' )
except TimeoutException:
result.append('timed out' )
except BaseException as e:
result.append(f"failed: {e}" )
# Needed for cleaning up.
snake_case__ : List[Any] = rmtree
snake_case__ : Optional[int] = rmdir
snake_case__ : Dict = chdir
@contextlib.contextmanager
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ) -> List[str]:
def signal_handler(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TimeoutException('Timed out!' )
signal.setitimer(signal.ITIMER_REAL , __SCREAMING_SNAKE_CASE )
signal.signal(signal.SIGALRM , __SCREAMING_SNAKE_CASE )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def UpperCamelCase__ ( ) -> List[str]:
snake_case__ : Dict = WriteOnlyStringIO()
with contextlib.redirect_stdout(__SCREAMING_SNAKE_CASE ):
with contextlib.redirect_stderr(__SCREAMING_SNAKE_CASE ):
with redirect_stdin(__SCREAMING_SNAKE_CASE ):
yield
@contextlib.contextmanager
def UpperCamelCase__ ( ) -> Optional[int]:
with tempfile.TemporaryDirectory() as dirname:
with chdir(__SCREAMING_SNAKE_CASE ):
yield dirname
class lowercase_ ( lowerCAmelCase_ ):
pass
class lowercase_ ( io.StringIO ):
def _lowerCAmelCase ( self : Optional[Any] , *__lowerCamelCase : int , **__lowerCamelCase : Optional[int] ):
raise OSError
def _lowerCAmelCase ( self : int , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Tuple ):
raise OSError
def _lowerCAmelCase ( self : Any , *__lowerCamelCase : int , **__lowerCamelCase : Union[str, Any] ):
raise OSError
def _lowerCAmelCase ( self : Tuple , *__lowerCamelCase : List[str] , **__lowerCamelCase : Optional[Any] ):
return False
class lowercase_ ( contextlib._RedirectStream ): # type: ignore
A_ = "stdin"
@contextlib.contextmanager
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ) -> List[str]:
if root == ".":
yield
return
snake_case__ : str = os.getcwd()
os.chdir(__SCREAMING_SNAKE_CASE )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(__SCREAMING_SNAKE_CASE )
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE=None ) -> Any:
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
snake_case__ : List[Any] = None
snake_case__ : List[str] = None
import os
snake_case__ : List[Any] = '1'
snake_case__ : Union[str, Any] = None
snake_case__ : Tuple = None
snake_case__ : Dict = None
snake_case__ : Tuple = None
snake_case__ : List[str] = None
snake_case__ : Any = None
snake_case__ : Optional[Any] = None
snake_case__ : Union[str, Any] = None
snake_case__ : int = None
snake_case__ : Optional[int] = None
snake_case__ : Union[str, Any] = None
snake_case__ : List[Any] = None
snake_case__ : Optional[int] = None
snake_case__ : Union[str, Any] = None
snake_case__ : Tuple = None
snake_case__ : int = None
snake_case__ : Optional[Any] = None
snake_case__ : Optional[int] = None
snake_case__ : List[str] = None
snake_case__ : List[str] = None
snake_case__ : List[str] = None
snake_case__ : Dict = None
snake_case__ : Optional[Any] = None
snake_case__ : List[Any] = None
snake_case__ : Tuple = None
snake_case__ : Optional[int] = None
snake_case__ : int = None
import shutil
snake_case__ : List[str] = None
snake_case__ : List[Any] = None
snake_case__ : List[Any] = None
import subprocess
snake_case__ : int = None # type: ignore
snake_case__ : Dict = None
import sys
snake_case__ : Tuple = None
snake_case__ : Tuple = None
snake_case__ : List[str] = None
snake_case__ : Optional[int] = None
snake_case__ : List[Any] = None
| 270 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ = {
'''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegatronBertForCausalLM''',
'''MegatronBertForMaskedLM''',
'''MegatronBertForMultipleChoice''',
'''MegatronBertForNextSentencePrediction''',
'''MegatronBertForPreTraining''',
'''MegatronBertForQuestionAnswering''',
'''MegatronBertForSequenceClassification''',
'''MegatronBertForTokenClassification''',
'''MegatronBertModel''',
'''MegatronBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 420 |
'''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
lowercase__ = {
'''susnato/ernie-m-base_pytorch''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json''',
'''susnato/ernie-m-large_pytorch''': '''https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json''',
}
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
snake_case = """ernie_m"""
snake_case = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , UpperCAmelCase_ = 25_00_02 , UpperCAmelCase_ = 7_68 , UpperCAmelCase_ = 12 , UpperCAmelCase_ = 12 , UpperCAmelCase_ = 30_72 , UpperCAmelCase_ = "gelu" , UpperCAmelCase_ = 0.1 , UpperCAmelCase_ = 0.1 , UpperCAmelCase_ = 5_14 , UpperCAmelCase_ = 0.02 , UpperCAmelCase_ = 1 , UpperCAmelCase_ = 1e-0_5 , UpperCAmelCase_=None , UpperCAmelCase_=False , UpperCAmelCase_=0.0 , **UpperCAmelCase_ , ):
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = classifier_dropout
snake_case_ = is_decoder
snake_case_ = act_dropout
| 420 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def __a(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False ):
'''simple docstring'''
_lowerCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
_lowerCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def __a(SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
_lowerCAmelCase = ""
else:
_lowerCAmelCase = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCAmelCase = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
_lowerCAmelCase = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase = in_proj_weight[
: config.hidden_size, :
]
_lowerCAmelCase = in_proj_bias[: config.hidden_size]
_lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
_lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def __a(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
_lowerCAmelCase = dct.pop(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = val
def __a():
'''simple docstring'''
_lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowerCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
_lowerCAmelCase = DeiTConfig()
# all deit models have fine-tuned heads
_lowerCAmelCase = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
_lowerCAmelCase = 1000
_lowerCAmelCase = "huggingface/label-files"
_lowerCAmelCase = "imagenet-1k-id2label.json"
_lowerCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
_lowerCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
_lowerCAmelCase = idalabel
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
_lowerCAmelCase = int(deit_name[-6:-4] )
_lowerCAmelCase = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
_lowerCAmelCase = 192
_lowerCAmelCase = 768
_lowerCAmelCase = 12
_lowerCAmelCase = 3
elif deit_name[9:].startswith("small" ):
_lowerCAmelCase = 384
_lowerCAmelCase = 1536
_lowerCAmelCase = 12
_lowerCAmelCase = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
_lowerCAmelCase = 1024
_lowerCAmelCase = 4096
_lowerCAmelCase = 24
_lowerCAmelCase = 16
# load original model from timm
_lowerCAmelCase = timm.create_model(SCREAMING_SNAKE_CASE_ , pretrained=SCREAMING_SNAKE_CASE_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_lowerCAmelCase = timm_model.state_dict()
_lowerCAmelCase = create_rename_keys(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load HuggingFace model
_lowerCAmelCase = DeiTForImageClassificationWithTeacher(SCREAMING_SNAKE_CASE_ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by DeiTImageProcessor
_lowerCAmelCase = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
_lowerCAmelCase = DeiTImageProcessor(size=SCREAMING_SNAKE_CASE_ , crop_size=config.image_size )
_lowerCAmelCase = image_processor(images=prepare_img() , return_tensors="pt" )
_lowerCAmelCase = encoding["pixel_values"]
_lowerCAmelCase = model(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = timm_model(SCREAMING_SNAKE_CASE_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.logits , atol=1e-3 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 18 | # DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _lowerCamelCase :
__a = 42
# setable values
__a = 42
__a = 42
__a = None
@classmethod
def UpperCamelCase_ ( cls , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]:
return cls(common=lowerCAmelCase , init_noise_sigma=lowerCAmelCase , timesteps=lowerCAmelCase )
@dataclass
class _lowerCamelCase ( UpperCamelCase_ ):
__a = 42
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__a = [e.name for e in FlaxKarrasDiffusionSchedulers]
__a = 42
@property
def UpperCamelCase_ ( self ) -> List[Any]:
return True
@register_to_config
def __init__( self , lowerCAmelCase = 1000 , lowerCAmelCase = 0.0001 , lowerCAmelCase = 0.02 , lowerCAmelCase = "linear" , lowerCAmelCase = None , lowerCAmelCase = "fixed_small" , lowerCAmelCase = True , lowerCAmelCase = "epsilon" , lowerCAmelCase = jnp.floataa , ) -> Optional[int]:
SCREAMING_SNAKE_CASE__: Optional[int]= dtype
def UpperCamelCase_ ( self , lowerCAmelCase = None ) -> DDPMSchedulerState:
if common is None:
SCREAMING_SNAKE_CASE__: Optional[Any]= CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE__: Dict= jnp.array(1.0 , dtype=self.dtype )
SCREAMING_SNAKE_CASE__: int= jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=lowerCAmelCase , init_noise_sigma=lowerCAmelCase , timesteps=lowerCAmelCase , )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None ) -> jnp.ndarray:
return sample
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = () ) -> DDPMSchedulerState:
SCREAMING_SNAKE_CASE__: str= self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE__: str= (jnp.arange(0 , lowerCAmelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=lowerCAmelCase , timesteps=lowerCAmelCase , )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None ) -> List[str]:
SCREAMING_SNAKE_CASE__: Tuple= state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__: int= jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
SCREAMING_SNAKE_CASE__: int= (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
SCREAMING_SNAKE_CASE__: Dict= jnp.clip(lowerCAmelCase , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
SCREAMING_SNAKE_CASE__: str= jnp.log(jnp.clip(lowerCAmelCase , a_min=1e-20 ) )
elif variance_type == "fixed_large":
SCREAMING_SNAKE_CASE__: Union[str, Any]= state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
SCREAMING_SNAKE_CASE__: Optional[Any]= jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
SCREAMING_SNAKE_CASE__: List[Any]= variance
SCREAMING_SNAKE_CASE__: Any= state.common.betas[t]
SCREAMING_SNAKE_CASE__: List[Any]= (predicted_variance + 1) / 2
SCREAMING_SNAKE_CASE__: Optional[Any]= frac * max_log + (1 - frac) * min_log
return variance
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
SCREAMING_SNAKE_CASE__: Union[str, Any]= timestep
if key is None:
SCREAMING_SNAKE_CASE__: Optional[Any]= jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[int]= jnp.split(lowerCAmelCase , sample.shape[1] , axis=1 )
else:
SCREAMING_SNAKE_CASE__: Any= None
# 1. compute alphas, betas
SCREAMING_SNAKE_CASE__: List[Any]= state.common.alphas_cumprod[t]
SCREAMING_SNAKE_CASE__: Optional[int]= jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
SCREAMING_SNAKE_CASE__: Optional[int]= 1 - alpha_prod_t
SCREAMING_SNAKE_CASE__: str= 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE__: Dict= (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE__: str= model_output
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE__: Tuple= (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
SCREAMING_SNAKE_CASE__: Any= jnp.clip(lowerCAmelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE__: int= (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
SCREAMING_SNAKE_CASE__: Any= state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
SCREAMING_SNAKE_CASE__: Dict= pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
SCREAMING_SNAKE_CASE__: int= jax.random.split(lowerCAmelCase , num=1 )
SCREAMING_SNAKE_CASE__: str= jax.random.normal(lowerCAmelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(lowerCAmelCase , lowerCAmelCase , predicted_variance=lowerCAmelCase ) ** 0.5) * noise
SCREAMING_SNAKE_CASE__: Union[str, Any]= jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
SCREAMING_SNAKE_CASE__: Optional[int]= pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=lowerCAmelCase , state=lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> jnp.ndarray:
return add_noise_common(state.common , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> jnp.ndarray:
return get_velocity_common(state.common , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def __len__( self ) -> Tuple:
return self.config.num_train_timesteps
| 64 | 0 |
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class a__ ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _lowerCamelCase ( self ) -> Union[str, Any]:
__A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
__A = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
__A = "xvjiarui/stable-diffusion-2-inpainting"
__A , __A = FlaxStableDiffusionInpaintPipeline.from_pretrained(lowercase__ , safety_checker=lowercase__ )
__A = "Face of a yellow cat, high resolution, sitting on a park bench"
__A = jax.random.PRNGKey(0 )
__A = 50
__A = jax.device_count()
__A = num_samples * [prompt]
__A = num_samples * [init_image]
__A = num_samples * [mask_image]
__A , __A , __A = pipeline.prepare_inputs(lowercase__ , lowercase__ , lowercase__ )
# shard inputs and rng
__A = replicate(lowercase__ )
__A = jax.random.split(lowercase__ , jax.device_count() )
__A = shard(lowercase__ )
__A = shard(lowercase__ )
__A = shard(lowercase__ )
__A = pipeline(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , jit=lowercase__ )
__A = output.images.reshape(lowercase__ , 512 , 512 , 3 )
__A = images[0, 253:256, 253:256, -1]
__A = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__A = jnp.array(
[0.361_1307, 0.3764_9736, 0.375_7408, 0.3821_3953, 0.3929_5167, 0.384_1631, 0.4155_4978, 0.413_7475, 0.421_7084] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 205 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
snake_case_ : List[str] ={}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any =['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
snake_case_ : int =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 205 | 1 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
a__ = logging.get_logger(__name__)
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : Optional[int] , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Optional[Any]) -> None:
"""simple docstring"""
warnings.warn(
"""The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use ImageGPTImageProcessor instead.""" , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase)
| 477 |
from typing import Dict
from .base import GenericTensor, Pipeline
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : List[Any]=None , lowerCAmelCase : int=None , lowerCAmelCase : int=None , **lowerCAmelCase : Tuple) -> Optional[int]:
"""simple docstring"""
if tokenize_kwargs is None:
_snake_case : Tuple = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"""truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""")
_snake_case : str = truncation
_snake_case : Dict = tokenize_kwargs
_snake_case : List[Any] = {}
if return_tensors is not None:
_snake_case : int = return_tensors
return preprocess_params, {}, postprocess_params
def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[Any]) -> Dict[str, GenericTensor]:
"""simple docstring"""
_snake_case : int = self.framework
_snake_case : Optional[Any] = self.tokenizer(lowerCAmelCase , return_tensors=lowerCAmelCase , **lowerCAmelCase)
return model_inputs
def UpperCamelCase_ ( self : Any , lowerCAmelCase : List[str]) -> int:
"""simple docstring"""
_snake_case : List[str] = self.model(**lowerCAmelCase)
return model_outputs
def UpperCamelCase_ ( self : List[str] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any=False) -> Tuple:
"""simple docstring"""
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : Dict , *lowerCAmelCase : str , **lowerCAmelCase : Any) -> Dict:
"""simple docstring"""
return super().__call__(*lowerCAmelCase , **lowerCAmelCase)
| 477 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
A_ : Dict = logging.get_logger(__name__)
A_ : List[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A_ : List[str] = {
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
A_ : Any = {
'junnyu/roformer_chinese_small': 1536,
'junnyu/roformer_chinese_base': 1536,
'junnyu/roformer_chinese_char_small': 512,
'junnyu/roformer_chinese_char_base': 512,
'junnyu/roformer_small_discriminator': 128,
'junnyu/roformer_small_generator': 128,
}
A_ : Union[str, Any] = {
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Union[str, Any] = VOCAB_FILES_NAMES
UpperCAmelCase__: List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__: str = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__: List[str] = RoFormerTokenizer
def __init__( self , A__=None , A__=None , A__=True , A__="[UNK]" , A__="[SEP]" , A__="[PAD]" , A__="[CLS]" , A__="[MASK]" , A__=True , A__=None , **A__ , ):
super().__init__(
A__ , tokenizer_file=A__ , do_lower_case=A__ , unk_token=A__ , sep_token=A__ , pad_token=A__ , cls_token=A__ , mask_token=A__ , tokenize_chinese_chars=A__ , strip_accents=A__ , **A__ , )
A__ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("""lowercase""" , A__ ) != do_lower_case
or pre_tok_state.get("""strip_accents""" , A__ ) != strip_accents
):
A__ : Dict = getattr(A__ , pre_tok_state.pop("""type""" ) )
A__ : int = do_lower_case
A__ : str = strip_accents
A__ : int = pre_tok_class(**A__ )
A__ : List[str] = do_lower_case
def __getstate__( self ):
A__ : Union[str, Any] = self.__dict__.copy()
A__ : Tuple = BertPreTokenizer()
return state
def __setstate__( self , A__ ):
A__ : List[Any] = d
A__ : str = self.__dict__["""_tokenizer"""].get_vocab()
A__ : Optional[int] = PreTokenizer.custom(JiebaPreTokenizer(A__ ) )
def __A ( self , A__ , A__=None ):
A__ : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self , A__ , A__ = None ):
A__ : Dict = [self.sep_token_id]
A__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , A__ , A__ = None ):
A__ : str = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
def __A ( self , A__ , A__=None , A__=None , A__=False , **A__ , ):
A__ : str = BertPreTokenizer()
return super().save_pretrained(A__ , A__ , A__ , A__ , **A__ )
| 705 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _a (__magic_name__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__: Any = TextToVideoSDPipeline
UpperCAmelCase__: Any = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__: Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
UpperCAmelCase__: Optional[int] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def __A ( self ):
torch.manual_seed(0 )
A__ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
A__ : Optional[int] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A__ , set_alpha_to_one=A__ , )
torch.manual_seed(0 )
A__ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A__ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
A__ : Union[str, Any] = CLIPTextModel(A__ )
A__ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A__ : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def __A ( self , A__ , A__=0 ):
if str(A__ ).startswith("""mps""" ):
A__ : Tuple = torch.manual_seed(A__ )
else:
A__ : List[str] = torch.Generator(device=A__ ).manual_seed(A__ )
A__ : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def __A ( self ):
A__ : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A__ : Union[str, Any] = self.get_dummy_components()
A__ : Union[str, Any] = TextToVideoSDPipeline(**A__ )
A__ : int = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
A__ : int = self.get_dummy_inputs(A__ )
A__ : int = """np"""
A__ : Any = sd_pipe(**A__ ).frames
A__ : Dict = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
A__ : Optional[Any] = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A__ , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A__ , expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def __A ( self ):
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def __A ( self ):
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def __A ( self ):
pass
def __A ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
A__ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
A__ : Tuple = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A__ : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
A__ : int = pipe.to("""cuda""" )
A__ : Optional[Any] = """Spiderman is surfing"""
A__ : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ : Optional[Any] = pipe(A__ , generator=A__ , num_inference_steps=25 , output_type="""pt""" ).frames
A__ : Dict = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def __A ( self ):
A__ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
A__ : Optional[int] = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A__ : List[str] = pipe.to("""cuda""" )
A__ : Dict = """Spiderman is surfing"""
A__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ : Optional[int] = pipe(A__ , generator=A__ , num_inference_steps=2 , output_type="""pt""" ).frames
A__ : Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 64 | 0 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __UpperCAmelCase ( ):
snake_case_ = ArgumentParser(
description=(
'PyTorch TPU distributed training launch '
'helper utility that will spawn up '
'multiple distributed processes'
))
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=_snake_case , default=1 , help='Number of TPU cores to use (1 or 8).')
# positional
parser.add_argument(
'training_script' , type=_snake_case , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=_snake_case)
return parser.parse_args()
def __UpperCAmelCase ( ):
snake_case_ = parse_args()
# Import training_script as a module.
snake_case_ = Path(args.training_script)
sys.path.append(str(script_fpath.parent.resolve()))
snake_case_ = script_fpath.stem
snake_case_ = importlib.import_module(_snake_case)
# Patch sys.argv
snake_case_ = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores)]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores)
if __name__ == "__main__":
main()
| 198 |
"""simple docstring"""
_UpperCamelCase = 8.31_44_62 # Unit - J mol-1 K-1
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 341 | 0 |
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def __lowerCAmelCase ( UpperCamelCase ) -> Dict:
lowerCAmelCase__ : List[str] = os.path.join(args.tf_model_dir , '''parameters.json''' )
lowerCAmelCase__ : Dict = json.loads(open(UpperCamelCase ).read() )
if not params:
raise ValueError(
F"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith('''.pt''' ):
lowerCAmelCase__ : Union[str, Any] = args.output + '''.pt'''
lowerCAmelCase__ : Dict = OrderedDict()
with tf.device('''/CPU:0''' ):
lowerCAmelCase__ : Union[str, Any] = tf.train.load_checkpoint(args.tf_model_dir )
lowerCAmelCase__ : Optional[Any] = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowerCAmelCase__ : Union[str, Any] = reader.get_tensor(UpperCamelCase ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
lowerCAmelCase__ : Optional[Any] = int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
lowerCAmelCase__ : int = 8
lowerCAmelCase__ : Dict = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowerCAmelCase__ : Any = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase__ : Tuple = torch.tensor(UpperCamelCase )
elif key_name.startswith('''model/moe''' ):
lowerCAmelCase__ : int = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
lowerCAmelCase__ : Any = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
lowerCAmelCase__ : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase__ : List[Any] = torch.tensor(UpperCamelCase )
elif key_name.endswith('''/softmlp/kernel''' ):
lowerCAmelCase__ : Optional[Any] = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
lowerCAmelCase__ : Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase__ : int = torch.tensor(UpperCamelCase )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
lowerCAmelCase__ : str = key_name[-9:-7]
for i in range(16 ):
lowerCAmelCase__ : List[Any] = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
lowerCAmelCase__ : str = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowerCAmelCase__ : Any = torch.tensor(UpperCamelCase )
elif key_name.startswith('''model/mlp''' ):
lowerCAmelCase__ : str = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
lowerCAmelCase__ : str = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
lowerCAmelCase__ : Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase__ : Any = torch.tensor(UpperCamelCase )
elif key_name.endswith('''/p1/bias''' ):
lowerCAmelCase__ : Dict = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
lowerCAmelCase__ : Optional[Any] = vnp.copy() # same because it is one dimensional
lowerCAmelCase__ : Optional[Any] = torch.tensor(UpperCamelCase )
elif key_name.endswith('''/p2/kernel''' ):
lowerCAmelCase__ : str = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
lowerCAmelCase__ : List[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase__ : Any = torch.tensor(UpperCamelCase )
elif key_name.endswith('''/p2/bias''' ):
lowerCAmelCase__ : Union[str, Any] = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
lowerCAmelCase__ : List[str] = vnp.copy() # same because it is one dimensional
lowerCAmelCase__ : Optional[Any] = torch.tensor(UpperCamelCase )
elif key_name.startswith('''model/ln''' ):
lowerCAmelCase__ : str = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowerCAmelCase__ : Tuple = '''model.blocks.%d.feed_forward.norm.bias''' % player
lowerCAmelCase__ : Optional[int] = vnp.copy() # same because it is one dimensional
lowerCAmelCase__ : int = torch.tensor(UpperCamelCase )
elif key_name.endswith('''/g''' ):
lowerCAmelCase__ : Optional[int] = '''model.blocks.%d.feed_forward.norm.weight''' % player
lowerCAmelCase__ : int = vnp.copy() # same because it is one dimensional
lowerCAmelCase__ : Dict = torch.tensor(UpperCamelCase )
elif key_name.startswith('''model/att''' ):
lowerCAmelCase__ : Tuple = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
lowerCAmelCase__ : Any = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowerCAmelCase__ : Union[str, Any] = state[:, 0, :, :]
lowerCAmelCase__ : Optional[int] = state[:, 1, :, :]
lowerCAmelCase__ : List[str] = state[:, 2, :, :]
lowerCAmelCase__ : Optional[Any] = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase__ : List[str] = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase__ : str = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase__ : Any = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
lowerCAmelCase__ : List[str] = torch.tensor(UpperCamelCase )
lowerCAmelCase__ : Any = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
lowerCAmelCase__ : Optional[int] = torch.tensor(UpperCamelCase )
lowerCAmelCase__ : Any = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
lowerCAmelCase__ : List[str] = torch.tensor(UpperCamelCase )
elif key_name.endswith('''/o/kernel''' ):
lowerCAmelCase__ : Tuple = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
lowerCAmelCase__ : Optional[int] = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase__ : Union[str, Any] = torch.tensor(UpperCamelCase )
elif key_name.startswith('''model/an''' ):
lowerCAmelCase__ : Optional[Any] = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowerCAmelCase__ : List[Any] = '''model.blocks.%d.self_attn.norm.bias''' % player
lowerCAmelCase__ : Optional[Any] = vnp.copy() # same because it is one dimensional
lowerCAmelCase__ : Union[str, Any] = torch.tensor(UpperCamelCase )
elif key_name.endswith('''/g''' ):
lowerCAmelCase__ : Dict = '''model.blocks.%d.self_attn.norm.weight''' % player
lowerCAmelCase__ : str = vnp.copy() # same because it is one dimensional
lowerCAmelCase__ : List[str] = torch.tensor(UpperCamelCase )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
lowerCAmelCase__ : Optional[Any] = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
lowerCAmelCase__ : Any = '''model.%s.weight''' % nlayer
lowerCAmelCase__ : str = vnp.copy() # same in embedded
lowerCAmelCase__ : Dict = torch.tensor(UpperCamelCase )
if key_name.startswith('''model/wte''' ):
lowerCAmelCase__ : Any = '''lm_head.weight'''
lowerCAmelCase__ : List[Any] = vnp.copy() # same in embedded
lowerCAmelCase__ : List[Any] = torch.tensor(UpperCamelCase )
elif key_name.startswith('''model/wob''' ):
lowerCAmelCase__ : Optional[Any] = '''final_logits_bias'''
lowerCAmelCase__ : Union[str, Any] = vnp.copy() # same in embedded
lowerCAmelCase__ : Any = state.reshape((1, -1) )
lowerCAmelCase__ : int = torch.tensor(UpperCamelCase )
elif key_name == "model/dense/kernel":
lowerCAmelCase__ : str = '''model.last_project.weight'''
lowerCAmelCase__ : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase__ : List[Any] = torch.tensor(UpperCamelCase )
elif key_name == "model/dense_1/bias":
lowerCAmelCase__ : List[str] = '''model.last_project.bias'''
lowerCAmelCase__ : Optional[int] = vnp.copy() # same because it is one dimensional
lowerCAmelCase__ : str = torch.tensor(UpperCamelCase )
torch.save(UpperCamelCase , args.output )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser(
description="""model converter.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("""--tf_model_dir""", metavar="""PATH""", type=str, required=True, help="""import model""")
parser.add_argument("""--output""", metavar="""PATH""", type=str, required=True, help="""output model""")
lowerCAmelCase_ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 716 |
from __future__ import annotations
def __lowerCAmelCase ( UpperCamelCase ) -> list[int]: # This function is recursive
lowerCAmelCase__ : Tuple = len(UpperCamelCase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowerCAmelCase__ : Optional[int] = array[0]
lowerCAmelCase__ : Union[str, Any] = False
lowerCAmelCase__ : List[str] = 1
lowerCAmelCase__ : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
lowerCAmelCase__ : List[Any] = True
lowerCAmelCase__ : Optional[int] = [element for element in array[i:] if element >= array[i]]
lowerCAmelCase__ : Any = longest_subsequence(UpperCamelCase )
if len(UpperCamelCase ) > len(UpperCamelCase ):
lowerCAmelCase__ : Optional[int] = temp_array
else:
i += 1
lowerCAmelCase__ : Optional[Any] = [element for element in array[1:] if element >= pivot]
lowerCAmelCase__ : List[Any] = [pivot, *longest_subsequence(UpperCamelCase )]
if len(UpperCamelCase ) > len(UpperCamelCase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 470 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase :
def __init__( self : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=13 , UpperCAmelCase : Tuple=32 , UpperCAmelCase : Optional[Any]=3 , UpperCAmelCase : Optional[int]=4 , UpperCAmelCase : Optional[int]=[10, 20, 30, 40] , UpperCAmelCase : Dict=[2, 2, 3, 2] , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : str=True , UpperCAmelCase : List[Any]=37 , UpperCAmelCase : List[Any]="gelu" , UpperCAmelCase : List[Any]=10 , UpperCAmelCase : Optional[int]=0.0_2 , UpperCAmelCase : Union[str, Any]=["stage2", "stage3", "stage4"] , UpperCAmelCase : Optional[int]=[2, 3, 4] , UpperCAmelCase : Optional[int]=None , ) -> int:
lowerCAmelCase :int = parent
lowerCAmelCase :Optional[Any] = batch_size
lowerCAmelCase :int = image_size
lowerCAmelCase :Any = num_channels
lowerCAmelCase :Tuple = num_stages
lowerCAmelCase :List[str] = hidden_sizes
lowerCAmelCase :int = depths
lowerCAmelCase :Any = is_training
lowerCAmelCase :Dict = use_labels
lowerCAmelCase :Dict = intermediate_size
lowerCAmelCase :Tuple = hidden_act
lowerCAmelCase :Optional[Any] = num_labels
lowerCAmelCase :Optional[Any] = initializer_range
lowerCAmelCase :str = out_features
lowerCAmelCase :List[Any] = out_indices
lowerCAmelCase :str = scope
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
lowerCAmelCase :int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase :Any = None
if self.use_labels:
lowerCAmelCase :Any = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase :Optional[Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def UpperCAmelCase__ ( self : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : str ) -> List[str]:
lowerCAmelCase :Optional[int] = ConvNextVaModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase :int = model(UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase__ ( self : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : int ) -> str:
lowerCAmelCase :Tuple = ConvNextVaForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase :List[Any] = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Dict ) -> Tuple:
lowerCAmelCase :Optional[Any] = ConvNextVaBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase :Tuple = model(UpperCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCAmelCase :Any = None
lowerCAmelCase :Tuple = ConvNextVaBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCAmelCase :Optional[Any] = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def UpperCAmelCase__ ( self : str ) -> List[Any]:
lowerCAmelCase :Tuple = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase :List[str] = config_and_inputs
lowerCAmelCase :Any = {'pixel_values': pixel_values}
return config, inputs_dict
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
lowerCAmelCase :Tuple = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase :List[Any] = config_and_inputs
lowerCAmelCase :Optional[int] = {'pixel_values': pixel_values, 'labels': labels}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
lowercase_ : Tuple = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowercase_ : Tuple = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowercase_ : int = False
lowercase_ : Optional[int] = False
lowercase_ : Tuple = False
lowercase_ : Dict = False
lowercase_ : Union[str, Any] = False
def UpperCAmelCase__ ( self : List[str] ) -> List[str]:
lowerCAmelCase :str = ConvNextVaModelTester(self )
lowerCAmelCase :List[Any] = ConfigTester(self , config_class=UpperCAmelCase , has_text_modality=UpperCAmelCase , hidden_size=37 )
def UpperCAmelCase__ ( self : str ) -> List[str]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
return
@unittest.skip(reason='ConvNextV2 does not use inputs_embeds' )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
pass
@unittest.skip(reason='ConvNextV2 does not support input and output embeddings' )
def UpperCAmelCase__ ( self : List[str] ) -> Tuple:
pass
@unittest.skip(reason='ConvNextV2 does not use feedforward chunking' )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
pass
def UpperCAmelCase__ ( self : Any ) -> Tuple:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCAmelCase , lowerCAmelCase :List[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
lowerCAmelCase :Any = True
if model_class.__name__ in [
*get_values(UpperCAmelCase ),
*get_values(UpperCAmelCase ),
]:
continue
lowerCAmelCase :Tuple = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.train()
lowerCAmelCase :List[str] = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
lowerCAmelCase :Dict = model(**UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase__ ( self : Any ) -> Dict:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
lowerCAmelCase , lowerCAmelCase :Dict = self.model_tester.prepare_config_and_inputs_with_labels()
lowerCAmelCase :Tuple = False
lowerCAmelCase :Tuple = True
if (
model_class.__name__
in [*get_values(UpperCAmelCase ), *get_values(UpperCAmelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
lowerCAmelCase :str = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.gradient_checkpointing_enable()
model.train()
lowerCAmelCase :Any = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
lowerCAmelCase :Optional[int] = model(**UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase__ ( self : str ) -> Tuple:
lowerCAmelCase , lowerCAmelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase :Optional[Any] = model_class(UpperCAmelCase )
lowerCAmelCase :int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase :Optional[int] = [*signature.parameters.keys()]
lowerCAmelCase :List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def UpperCAmelCase__ ( self : List[Any] ) -> Dict:
lowerCAmelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
def check_hidden_states_output(UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Dict ):
lowerCAmelCase :List[Any] = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase :Optional[int] = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowerCAmelCase :Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase :Any = self.model_tester.num_stages
self.assertEqual(len(UpperCAmelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase , lowerCAmelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase :Optional[Any] = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase :Any = True
check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self : int ) -> Any:
lowerCAmelCase :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@slow
def UpperCAmelCase__ ( self : Dict ) -> Union[str, Any]:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase :Optional[int] = ConvNextVaModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def UpperCAmelCase ( ):
'''simple docstring'''
lowerCAmelCase :str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : Tuple ) -> int:
return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224' ) if is_vision_available() else None
@slow
def UpperCAmelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
lowerCAmelCase :Optional[Any] = ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224' ).to(UpperCAmelCase )
lowerCAmelCase :List[Any] = self.default_image_processor
lowerCAmelCase :Optional[int] = prepare_img()
lowerCAmelCase :Tuple = preprocessor(images=UpperCAmelCase , return_tensors='pt' ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase :Tuple = model(**UpperCAmelCase )
# verify the logits
lowerCAmelCase :Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowerCAmelCase :int = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) ) | 553 |
"""simple docstring"""
from math import sqrt
def UpperCAmelCase ( a__ = 1_00_00_00 ):
'''simple docstring'''
lowerCAmelCase :int = 0
lowerCAmelCase :int = 0
lowerCAmelCase :int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(a__ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""") | 553 | 1 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class _UpperCamelCase ( lowercase__ ):
UpperCAmelCase_ = """Speech2TextFeatureExtractor"""
UpperCAmelCase_ = """Speech2TextTokenizer"""
def __init__( self :Optional[int] , lowerCamelCase :Tuple , lowerCamelCase :Optional[int] ) -> Optional[int]:
super().__init__(__lowerCamelCase , __lowerCamelCase )
UpperCAmelCase__ = self.feature_extractor
UpperCAmelCase__ = False
def __call__( self :Optional[int] , *lowerCamelCase :List[Any] , **lowerCamelCase :str ) -> Optional[Any]:
if self._in_target_context_manager:
return self.current_processor(*__lowerCamelCase , **__lowerCamelCase )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
UpperCAmelCase__ = kwargs.pop("raw_speech" )
else:
UpperCAmelCase__ = kwargs.pop("audio" , __lowerCamelCase )
UpperCAmelCase__ = kwargs.pop("sampling_rate" , __lowerCamelCase )
UpperCAmelCase__ = kwargs.pop("text" , __lowerCamelCase )
if len(__lowerCamelCase ) > 0:
UpperCAmelCase__ = args[0]
UpperCAmelCase__ = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
UpperCAmelCase__ = self.feature_extractor(__lowerCamelCase , *__lowerCamelCase , sampling_rate=__lowerCamelCase , **__lowerCamelCase )
if text is not None:
UpperCAmelCase__ = self.tokenizer(__lowerCamelCase , **__lowerCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCAmelCase__ = encodings["input_ids"]
return inputs
def UpperCAmelCase_ ( self :Optional[int] , *lowerCamelCase :str , **lowerCamelCase :Optional[Any] ) -> Tuple:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def UpperCAmelCase_ ( self :Union[str, Any] , *lowerCamelCase :int , **lowerCamelCase :List[Any] ) -> List[str]:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@contextmanager
def UpperCAmelCase_ ( self :List[Any] ) -> Optional[Any]:
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
UpperCAmelCase__ = True
UpperCAmelCase__ = self.tokenizer
yield
UpperCAmelCase__ = self.feature_extractor
UpperCAmelCase__ = False
| 705 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCAmelCase : Tuple = {
"CarlCochet/trajectory-transformer-halfcheetah-medium-v2": (
"https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = """trajectory_transformer"""
UpperCAmelCase_ = ["""past_key_values"""]
UpperCAmelCase_ = {
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self :Union[str, Any] , lowerCamelCase :Optional[int]=100 , lowerCamelCase :Optional[int]=5 , lowerCamelCase :Optional[Any]=1 , lowerCamelCase :Any=1 , lowerCamelCase :int=249 , lowerCamelCase :Optional[Any]=6 , lowerCamelCase :Optional[Any]=17 , lowerCamelCase :Optional[int]=25 , lowerCamelCase :Union[str, Any]=4 , lowerCamelCase :Union[str, Any]=4 , lowerCamelCase :Optional[int]=128 , lowerCamelCase :List[Any]=0.1 , lowerCamelCase :List[str]=0.1 , lowerCamelCase :Tuple=0.1 , lowerCamelCase :Dict=0.00_06 , lowerCamelCase :Dict=512 , lowerCamelCase :Optional[Any]=0.02 , lowerCamelCase :Any=1e-12 , lowerCamelCase :Optional[Any]=1 , lowerCamelCase :List[Any]=True , lowerCamelCase :int=1 , lowerCamelCase :Dict=5_0256 , lowerCamelCase :Union[str, Any]=5_0256 , **lowerCamelCase :int , ) -> Optional[Any]:
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = action_weight
UpperCAmelCase__ = reward_weight
UpperCAmelCase__ = value_weight
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = block_size
UpperCAmelCase__ = action_dim
UpperCAmelCase__ = observation_dim
UpperCAmelCase__ = transition_dim
UpperCAmelCase__ = learning_rate
UpperCAmelCase__ = n_layer
UpperCAmelCase__ = n_head
UpperCAmelCase__ = n_embd
UpperCAmelCase__ = embd_pdrop
UpperCAmelCase__ = attn_pdrop
UpperCAmelCase__ = resid_pdrop
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = kaiming_initializer_range
UpperCAmelCase__ = use_cache
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
| 364 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
_lowerCAmelCase = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_lowerCAmelCase = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
_lowerCAmelCase = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
_lowerCAmelCase = {"""unk_token""": """<unk>"""}
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase ) )
_lowerCAmelCase = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073],
"""image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
_lowerCAmelCase = os.path.join(self.tmpdirname , lowerCamelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(lowerCamelCase , lowerCamelCase )
def A__ (self , **lowerCamelCase ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def A__ (self , **lowerCamelCase ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase )
def A__ (self , **lowerCamelCase ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase )
def A__ (self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_lowerCAmelCase = [Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_rust_tokenizer()
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = CLIPSegProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
_lowerCAmelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase )
_lowerCAmelCase = CLIPSegProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
_lowerCAmelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , lowerCamelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase = self.get_image_processor(do_normalize=lowerCamelCase , padding_value=1.0 )
_lowerCAmelCase = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = CLIPSegProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = image_processor(lowerCamelCase , return_tensors="""np""" )
_lowerCAmelCase = processor(images=lowerCamelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = CLIPSegProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
_lowerCAmelCase = """lower newer"""
_lowerCAmelCase = processor(text=lowerCamelCase )
_lowerCAmelCase = tokenizer(lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = CLIPSegProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
_lowerCAmelCase = """lower newer"""
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = processor(text=lowerCamelCase , images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = CLIPSegProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = self.prepare_image_inputs()
_lowerCAmelCase = processor(images=lowerCamelCase , visual_prompt=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """conditional_pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.get_image_processor()
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = CLIPSegProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
_lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase = processor.batch_decode(lowerCamelCase )
_lowerCAmelCase = tokenizer.batch_decode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase ) | 156 |
"""simple docstring"""
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''vocab_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json''',
},
'''merges_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''Salesforce/codegen-350M-mono''': (
'''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'''
),
},
}
SCREAMING_SNAKE_CASE : Optional[int] = {
'''Salesforce/codegen-350M-mono''': 2_0_4_8,
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['input_ids', 'attention_mask']
__UpperCamelCase = CodeGenTokenizer
def __init__(self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<|endoftext|>" , lowerCamelCase="<|endoftext|>" , lowerCamelCase="<|endoftext|>" , lowerCamelCase=False , **lowerCamelCase , ):
'''simple docstring'''
super().__init__(
lowerCamelCase , lowerCamelCase , tokenizer_file=lowerCamelCase , unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , add_prefix_space=lowerCamelCase , **lowerCamelCase , )
if kwargs.pop("""add_bos_token""" , lowerCamelCase ):
_lowerCAmelCase = kwargs.pop("""name_or_path""" , """""" )
raise ValueError(
"""Currenty GPT2's fast tokenizer does NOT support adding a BOS token."""
"""Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"""
f"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"""
f"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"""
"""This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."""
""" so that the fast tokenizer works correctly.""" )
_lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , lowerCamelCase ) != add_prefix_space:
_lowerCAmelCase = getattr(lowerCamelCase , pre_tok_state.pop("""type""" ) )
_lowerCAmelCase = add_prefix_space
_lowerCAmelCase = pre_tok_class(**lowerCamelCase )
_lowerCAmelCase = add_prefix_space
def A__ (self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = kwargs.get("""is_split_into_words""" , lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase , **lowerCamelCase )
def A__ (self , *lowerCamelCase , **lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = kwargs.get("""is_split_into_words""" , lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase , **lowerCamelCase )
def A__ (self , lowerCamelCase , lowerCamelCase = None ):
'''simple docstring'''
_lowerCAmelCase = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
def A__ (self , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = None , **lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase = super().decode(
token_ids=lowerCamelCase , skip_special_tokens=lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase , **lowerCamelCase , )
if truncate_before_pattern is not None and len(lowerCamelCase ) > 0:
_lowerCAmelCase = self.truncate(lowerCamelCase , lowerCamelCase )
return decoded_text
def A__ (self , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
def find_re(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
_lowerCAmelCase = pattern.search(lowerCamelCase , lowerCamelCase )
return m.start() if m else -1
_lowerCAmelCase = [re.compile(lowerCamelCase , re.MULTILINE ) for pattern in truncate_before_pattern]
_lowerCAmelCase = list(re.finditer("""^print""" , lowerCamelCase , re.MULTILINE ) )
if len(lowerCamelCase ) > 1:
_lowerCAmelCase = completion[: prints[1].start()]
_lowerCAmelCase = list(re.finditer("""^def""" , lowerCamelCase , re.MULTILINE ) )
if len(lowerCamelCase ) > 1:
_lowerCAmelCase = completion[: defs[1].start()]
_lowerCAmelCase = 0
_lowerCAmelCase = [
pos for pos in [find_re(lowerCamelCase , lowerCamelCase , lowerCamelCase ) for terminal in terminals] if pos != -1
]
if len(lowerCamelCase ) > 0:
return completion[: min(lowerCamelCase )]
else:
return completion | 156 | 1 |
from math import factorial
__lowerCamelCase : dict[str, int] = {str(digit): factorial(digit) for digit in range(10)}
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise TypeError("Parameter number must be int" )
if number < 0:
raise ValueError("Parameter number must be greater than or equal to 0" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(lowerCAmelCase ) )
def _snake_case ( lowerCAmelCase : int = 6_0 , lowerCAmelCase : int = 1_0_0_0_0_0_0 ):
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase ) or not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise TypeError("Parameters chain_length and number_limit must be int" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"Parameters chain_length and number_limit must be greater than 0" )
# the counter for the chains with the exact desired length
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
# the cached sizes of the previous chains
SCREAMING_SNAKE_CASE_ : dict[int, int] = {}
for start_chain_element in range(1 , lowerCAmelCase ):
# The temporary set will contain the elements of the chain
SCREAMING_SNAKE_CASE_ : Optional[int] = set()
SCREAMING_SNAKE_CASE_ : int = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
SCREAMING_SNAKE_CASE_ : List[Any] = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(lowerCAmelCase )
chain_set_length += 1
SCREAMING_SNAKE_CASE_ : str = digit_factorial_sum(lowerCAmelCase )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution()}''')
| 316 | from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class a__ :
A = 42 # [batch_size x 3]
A = 42 # [batch_size x 3]
A = 42 # [batch_size x 3]
A = 42 # [batch_size x 3]
A = 42
A = 42
A = 42
A = 42
A = 42
def __UpperCamelCase ( self : int ):
"""simple docstring"""
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
return torch.from_numpy(np.array([self.width, self.height],dtype=np.floataa ) )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
return torch.from_numpy(np.array([self.x_fov, self.y_fov],dtype=np.floataa ) )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.arange(self.height * self.width )
SCREAMING_SNAKE_CASE_ : str = torch.stack(
[
pixel_indices % self.width,
torch.div(_A,self.width,rounding_mode="trunc" ),
],axis=1,)
return coords
@property
def __UpperCamelCase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ : List[str] = self.shape
SCREAMING_SNAKE_CASE_ : Any = int(np.prod(_A ) )
SCREAMING_SNAKE_CASE_ : str = self.get_image_coords()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.broadcast_to(coords.unsqueeze(0 ),[batch_size * inner_batch_size, *coords.shape] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_camera_rays(_A )
SCREAMING_SNAKE_CASE_ : Any = rays.view(_A,inner_batch_size * self.height * self.width,2,3 )
return rays
def __UpperCamelCase ( self : Tuple,_A : torch.Tensor ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
SCREAMING_SNAKE_CASE_ : Tuple = coords.view(_A,-1,2 )
SCREAMING_SNAKE_CASE_ : List[str] = self.resolution()
SCREAMING_SNAKE_CASE_ : List[str] = self.fov()
SCREAMING_SNAKE_CASE_ : Optional[Any] = (flat.float() / (res - 1)) * 2 - 1
SCREAMING_SNAKE_CASE_ : Optional[int] = fracs * torch.tan(fov / 2 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = fracs.view(_A,-1,2 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
self.z.view(_A,1,3 )
+ self.x.view(_A,1,3 ) * fracs[:, :, :1]
+ self.y.view(_A,1,3 ) * fracs[:, :, 1:]
)
SCREAMING_SNAKE_CASE_ : Any = directions / directions.norm(dim=-1,keepdim=_A )
SCREAMING_SNAKE_CASE_ : Tuple = torch.stack(
[
torch.broadcast_to(self.origin.view(_A,1,3 ),[batch_size, directions.shape[1], 3] ),
directions,
],dim=2,)
return rays.view(_A,*_A,2,3 )
def __UpperCamelCase ( self : Optional[int],_A : int,_A : int ):
"""simple docstring"""
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin,x=self.x,y=self.y,z=self.z,width=_A,height=_A,x_fov=self.x_fov,y_fov=self.y_fov,)
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = []
SCREAMING_SNAKE_CASE_ : Any = []
SCREAMING_SNAKE_CASE_ : List[Any] = []
SCREAMING_SNAKE_CASE_ : int = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
SCREAMING_SNAKE_CASE_ : Dict = np.array([np.sin(lowerCAmelCase ), np.cos(lowerCAmelCase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
SCREAMING_SNAKE_CASE_ : Dict = -z * 4
SCREAMING_SNAKE_CASE_ : str = np.array([np.cos(lowerCAmelCase ), -np.sin(lowerCAmelCase ), 0.0] )
SCREAMING_SNAKE_CASE_ : int = np.cross(lowerCAmelCase , lowerCAmelCase )
origins.append(lowerCAmelCase )
xs.append(lowerCAmelCase )
ys.append(lowerCAmelCase )
zs.append(lowerCAmelCase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(lowerCAmelCase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowerCAmelCase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowerCAmelCase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowerCAmelCase , axis=0 ) ).float() , width=lowerCAmelCase , height=lowerCAmelCase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowerCAmelCase )) , )
| 316 | 1 |
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
SCREAMING_SNAKE_CASE: Any = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def _a ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , )-> Tuple:
output_path.parent.mkdir(parents=lowerCAmelCase , exist_ok=lowerCAmelCase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
lowerCAmelCase , lowerCAmelCase , f=output_path.as_posix() , input_names=lowerCAmelCase , output_names=lowerCAmelCase , dynamic_axes=lowerCAmelCase , do_constant_folding=lowerCAmelCase , use_external_data_format=lowerCAmelCase , enable_onnx_checker=lowerCAmelCase , opset_version=lowerCAmelCase , )
else:
export(
lowerCAmelCase , lowerCAmelCase , f=output_path.as_posix() , input_names=lowerCAmelCase , output_names=lowerCAmelCase , dynamic_axes=lowerCAmelCase , do_constant_folding=lowerCAmelCase , opset_version=lowerCAmelCase , )
@torch.no_grad()
def _a ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = False )-> Any:
SCREAMING_SNAKE_CASE_ = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
SCREAMING_SNAKE_CASE_ = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
SCREAMING_SNAKE_CASE_ = 'cpu'
SCREAMING_SNAKE_CASE_ = StableDiffusionPipeline.from_pretrained(lowerCAmelCase , torch_dtype=lowerCAmelCase ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = Path(lowerCAmelCase )
# TEXT ENCODER
SCREAMING_SNAKE_CASE_ = pipeline.text_encoder.config.max_position_embeddings
SCREAMING_SNAKE_CASE_ = pipeline.text_encoder.config.hidden_size
SCREAMING_SNAKE_CASE_ = pipeline.tokenizer(
'A sample prompt' , padding='max_length' , max_length=pipeline.tokenizer.model_max_length , truncation=lowerCAmelCase , return_tensors='pt' , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=lowerCAmelCase , dtype=torch.intaa )) , output_path=output_path / 'text_encoder' / 'model.onnx' , ordered_input_names=['input_ids'] , output_names=['last_hidden_state', 'pooler_output'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'sequence'},
} , opset=lowerCAmelCase , )
del pipeline.text_encoder
# UNET
SCREAMING_SNAKE_CASE_ = pipeline.unet.config.in_channels
SCREAMING_SNAKE_CASE_ = pipeline.unet.config.sample_size
SCREAMING_SNAKE_CASE_ = output_path / 'unet' / 'model.onnx'
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).to(device=lowerCAmelCase , dtype=lowerCAmelCase ),
torch.randn(2 ).to(device=lowerCAmelCase , dtype=lowerCAmelCase ),
torch.randn(2 , lowerCAmelCase , lowerCAmelCase ).to(device=lowerCAmelCase , dtype=lowerCAmelCase ),
False,
) , output_path=lowerCAmelCase , ordered_input_names=['sample', 'timestep', 'encoder_hidden_states', 'return_dict'] , output_names=['out_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'timestep': {0: 'batch'},
'encoder_hidden_states': {0: 'batch', 1: 'sequence'},
} , opset=lowerCAmelCase , use_external_data_format=lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = str(unet_path.absolute().as_posix() )
SCREAMING_SNAKE_CASE_ = os.path.dirname(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = onnx.load(lowerCAmelCase )
# clean up existing tensor files
shutil.rmtree(lowerCAmelCase )
os.mkdir(lowerCAmelCase )
# collate external tensor files into one
onnx.save_model(
lowerCAmelCase , lowerCAmelCase , save_as_external_data=lowerCAmelCase , all_tensors_to_one_file=lowerCAmelCase , location='weights.pb' , convert_attribute=lowerCAmelCase , )
del pipeline.unet
# VAE ENCODER
SCREAMING_SNAKE_CASE_ = pipeline.vae
SCREAMING_SNAKE_CASE_ = vae_encoder.config.in_channels
SCREAMING_SNAKE_CASE_ = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
SCREAMING_SNAKE_CASE_ = lambda lowerCAmelCase , lowerCAmelCase : vae_encoder.encode(lowerCAmelCase , lowerCAmelCase )[0].sample()
onnx_export(
lowerCAmelCase , model_args=(
torch.randn(1 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).to(device=lowerCAmelCase , dtype=lowerCAmelCase ),
False,
) , output_path=output_path / 'vae_encoder' / 'model.onnx' , ordered_input_names=['sample', 'return_dict'] , output_names=['latent_sample'] , dynamic_axes={
'sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=lowerCAmelCase , )
# VAE DECODER
SCREAMING_SNAKE_CASE_ = pipeline.vae
SCREAMING_SNAKE_CASE_ = vae_decoder.config.latent_channels
SCREAMING_SNAKE_CASE_ = vae_decoder.config.out_channels
# forward only through the decoder part
SCREAMING_SNAKE_CASE_ = vae_encoder.decode
onnx_export(
lowerCAmelCase , model_args=(
torch.randn(1 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).to(device=lowerCAmelCase , dtype=lowerCAmelCase ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=lowerCAmelCase , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
SCREAMING_SNAKE_CASE_ = pipeline.safety_checker
SCREAMING_SNAKE_CASE_ = safety_checker.config.vision_config.num_channels
SCREAMING_SNAKE_CASE_ = safety_checker.config.vision_config.image_size
SCREAMING_SNAKE_CASE_ = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ).to(device=lowerCAmelCase , dtype=lowerCAmelCase ),
torch.randn(1 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).to(device=lowerCAmelCase , dtype=lowerCAmelCase ),
) , output_path=output_path / 'safety_checker' / 'model.onnx' , ordered_input_names=['clip_input', 'images'] , output_names=['out_images', 'has_nsfw_concepts'] , dynamic_axes={
'clip_input': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
'images': {0: 'batch', 1: 'height', 2: 'width', 3: 'channels'},
} , opset=lowerCAmelCase , )
del pipeline.safety_checker
SCREAMING_SNAKE_CASE_ = OnnxRuntimeModel.from_pretrained(output_path / 'safety_checker' )
SCREAMING_SNAKE_CASE_ = pipeline.feature_extractor
else:
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_encoder' ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / 'vae_decoder' ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / 'text_encoder' ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / 'unet' ) , scheduler=pipeline.scheduler , safety_checker=lowerCAmelCase , feature_extractor=lowerCAmelCase , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(lowerCAmelCase )
print('ONNX pipeline saved to' , lowerCAmelCase )
del pipeline
del onnx_pipeline
SCREAMING_SNAKE_CASE_ = OnnxStableDiffusionPipeline.from_pretrained(lowerCAmelCase , provider='CPUExecutionProvider' )
print('ONNX pipeline is loadable' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE: List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=1_4,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
SCREAMING_SNAKE_CASE: Tuple = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa) | 360 |
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
SCREAMING_SNAKE_CASE: int = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
SCREAMING_SNAKE_CASE: Tuple = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE: int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE: Any = ''' Hello world! cécé herlolip'''
SCREAMING_SNAKE_CASE: Dict = [
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def _a ( lowerCAmelCase )-> Any:
SCREAMING_SNAKE_CASE_ = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase , lowerCAmelCase )
def _a ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )-> str:
SCREAMING_SNAKE_CASE_ = dct.pop(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = val
def _a ( lowerCAmelCase )-> List[str]:
SCREAMING_SNAKE_CASE_ = torch.load(lowerCAmelCase , map_location='cpu' )
SCREAMING_SNAKE_CASE_ = torch.hub.load('pytorch/fairseq' , 'bart.large.cnn' ).eval()
hub_interface.model.load_state_dict(sd['model'] )
return hub_interface
def _a ( lowerCAmelCase )-> Optional[Any]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = emb.weight.shape
SCREAMING_SNAKE_CASE_ = nn.Linear(lowerCAmelCase , lowerCAmelCase , bias=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = emb.weight.data
return lin_layer
@torch.no_grad()
def _a ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None )-> Tuple:
if not os.path.exists(lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = torch.hub.load('pytorch/fairseq' , lowerCAmelCase ).eval()
else:
SCREAMING_SNAKE_CASE_ = load_xsum_checkpoint(lowerCAmelCase )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
SCREAMING_SNAKE_CASE_ = checkpoint_path.replace('.' , '-' )
SCREAMING_SNAKE_CASE_ = BartConfig.from_pretrained(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = bart.encode(lowerCAmelCase ).unsqueeze(0 )
SCREAMING_SNAKE_CASE_ = BartTokenizer.from_pretrained(lowerCAmelCase ).encode(lowerCAmelCase , return_tensors='pt' ).unsqueeze(0 )
if not torch.eq(lowerCAmelCase , lowerCAmelCase ).all():
raise ValueError(
F'''converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}''' )
if checkpoint_path == "bart.large.mnli":
SCREAMING_SNAKE_CASE_ = bart.state_dict()
remove_ignore_keys_(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = state_dict['model.decoder.embed_tokens.weight']
for src, dest in mnli_rename_keys:
rename_key(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = BartForSequenceClassification(lowerCAmelCase ).eval()
model.load_state_dict(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = bart.predict('mnli' , lowerCAmelCase , return_logits=lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(lowerCAmelCase )[0] # logits
else: # no classification heads to worry about
SCREAMING_SNAKE_CASE_ = bart.model.state_dict()
remove_ignore_keys_(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = state_dict['decoder.embed_tokens.weight']
SCREAMING_SNAKE_CASE_ = bart.extract_features(lowerCAmelCase )
if hf_checkpoint_name == "facebook/bart-large":
SCREAMING_SNAKE_CASE_ = BartModel(lowerCAmelCase ).eval()
model.load_state_dict(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(lowerCAmelCase ).model[0]
else:
SCREAMING_SNAKE_CASE_ = BartForConditionalGeneration(lowerCAmelCase ).eval() # an existing summarization ckpt
model.model.load_state_dict(lowerCAmelCase )
if hasattr(lowerCAmelCase , 'lm_head' ):
SCREAMING_SNAKE_CASE_ = make_linear_from_emb(model.model.shared )
SCREAMING_SNAKE_CASE_ = model.model(lowerCAmelCase )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F'''`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}''' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('Some values in `fairseq_output` are different from `new_model_outputs`' )
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
model.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE: Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
SCREAMING_SNAKE_CASE: List[str] = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config) | 360 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"openai/imagegpt-small": "",
"openai/imagegpt-medium": "",
"openai/imagegpt-large": "",
}
class UpperCAmelCase ( snake_case_ ):
SCREAMING_SNAKE_CASE__ = '''imagegpt'''
SCREAMING_SNAKE_CASE__ = ['''past_key_values''']
SCREAMING_SNAKE_CASE__ = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , _lowerCAmelCase=512 + 1 , _lowerCAmelCase=32 * 32 , _lowerCAmelCase=512 , _lowerCAmelCase=24 , _lowerCAmelCase=8 , _lowerCAmelCase=None , _lowerCAmelCase="quick_gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , **_lowerCAmelCase , ):
_lowerCAmelCase = vocab_size
_lowerCAmelCase = n_positions
_lowerCAmelCase = n_embd
_lowerCAmelCase = n_layer
_lowerCAmelCase = n_head
_lowerCAmelCase = n_inner
_lowerCAmelCase = activation_function
_lowerCAmelCase = resid_pdrop
_lowerCAmelCase = embd_pdrop
_lowerCAmelCase = attn_pdrop
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_range
_lowerCAmelCase = scale_attn_weights
_lowerCAmelCase = use_cache
_lowerCAmelCase = scale_attn_by_inverse_layer_idx
_lowerCAmelCase = reorder_and_upcast_attn
_lowerCAmelCase = tie_word_embeddings
super().__init__(tie_word_embeddings=_lowerCAmelCase , **_lowerCAmelCase )
class UpperCAmelCase ( snake_case_ ):
@property
def __lowerCAmelCase ( self ):
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = 1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = 3 , _lowerCAmelCase = 32 , _lowerCAmelCase = 32 , ):
_lowerCAmelCase = self._generate_dummy_images(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = dict(preprocessor(images=_lowerCAmelCase , return_tensors=_lowerCAmelCase ) )
return inputs | 664 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
UpperCAmelCase_ = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def UpperCAmelCase__ ( )->Any:
_lowerCAmelCase = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
_lowerCAmelCase = get_sagemaker_input()
else:
_lowerCAmelCase = get_cluster_input()
return config
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : int=None )->str:
if subparsers is not None:
_lowerCAmelCase = subparsers.add_parser('''config''' , description=_SCREAMING_SNAKE_CASE )
else:
_lowerCAmelCase = argparse.ArgumentParser('''Accelerate config command''' , description=_SCREAMING_SNAKE_CASE )
parser.add_argument(
'''--config_file''' , default=_SCREAMING_SNAKE_CASE , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
return parser
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict )->str:
_lowerCAmelCase = get_user_input()
if args.config_file is not None:
_lowerCAmelCase = args.config_file
else:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
os.makedirs(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(_SCREAMING_SNAKE_CASE )
else:
config.to_yaml_file(_SCREAMING_SNAKE_CASE )
print(f'''accelerate configuration saved at {config_file}''' )
def UpperCAmelCase__ ( )->List[Any]:
_lowerCAmelCase = config_command_parser()
_lowerCAmelCase = parser.parse_args()
config_command(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main() | 664 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
A = None
A = logging.get_logger(__name__)
A = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
A = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
A = {
'''google/rembert''': 256,
}
A = '''▁'''
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = RemBertTokenizer
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase="[CLS]" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="[SEP]" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="[CLS]" , _UpperCAmelCase="[MASK]" , **_UpperCAmelCase , ):
# Mask token behave like a normal word, i.e. include the space before it
__a : List[str] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , **_UpperCAmelCase , )
__a : Tuple = do_lower_case
__a : Tuple = remove_space
__a : Tuple = keep_accents
__a : str = vocab_file
__a : Optional[Any] = False if not self.vocab_file else True
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__a : Optional[Any] = [self.sep_token_id]
__a : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__a : List[str] = [self.sep_token_id]
__a : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if not os.path.isdir(_UpperCAmelCase ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(_UpperCAmelCase ) )
return
__a : int = os.path.join(
_UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file , _UpperCAmelCase )
return (out_vocab_file,) | 52 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''llama'''
__lowerCAmelCase = ['''past_key_values''']
def __init__( self , _UpperCAmelCase=32000 , _UpperCAmelCase=4096 , _UpperCAmelCase=11008 , _UpperCAmelCase=32 , _UpperCAmelCase=32 , _UpperCAmelCase=None , _UpperCAmelCase="silu" , _UpperCAmelCase=2048 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=True , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=None , **_UpperCAmelCase , ):
__a : Dict = vocab_size
__a : Union[str, Any] = max_position_embeddings
__a : str = hidden_size
__a : List[str] = intermediate_size
__a : Any = num_hidden_layers
__a : int = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__a : Union[str, Any] = num_attention_heads
__a : Optional[int] = num_key_value_heads
__a : Dict = hidden_act
__a : Union[str, Any] = initializer_range
__a : int = rms_norm_eps
__a : Optional[int] = pretraining_tp
__a : Optional[Any] = use_cache
__a : Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase , )
def _lowerCamelCase ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"""got {self.rope_scaling}""" )
__a : Tuple = self.rope_scaling.get('''type''' , _UpperCAmelCase )
__a : Optional[int] = self.rope_scaling.get('''factor''' , _UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" ) | 52 | 1 |
import math
def A__ ( SCREAMING_SNAKE_CASE_ ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Tuple =F"Input value of [number={number}] must be an integer"
raise TypeError(SCREAMING_SNAKE_CASE_ )
if number < 1:
lowerCamelCase : Optional[Any] =F"Input value of [number={number}] must be > 0"
raise ValueError(SCREAMING_SNAKE_CASE_ )
elif number == 1:
return 3
elif number == 2:
return 5
else:
lowerCamelCase : str =int(math.log(number // 3 , 2 ) ) + 2
lowerCamelCase : Tuple =[3, 5]
lowerCamelCase : List[Any] =2
lowerCamelCase : Optional[Any] =3
for block in range(1 , SCREAMING_SNAKE_CASE_ ):
for _ in range(SCREAMING_SNAKE_CASE_ ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
snake_case_ = 0
try:
snake_case_ = proth(number)
except ValueError:
print(F"""ValueError: there is no {number}th Proth number""")
continue
print(F"""The {number}th Proth number: {value}""")
| 262 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class snake_case_ ( _A , _A):
@register_to_config
def __init__( self , __lowercase = 7_6_8 , ) -> List[Any]:
super().__init__()
lowerCamelCase : str =nn.Parameter(torch.zeros(1 , __lowercase ) )
lowerCamelCase : Union[str, Any] =nn.Parameter(torch.ones(1 , __lowercase ) )
def __lowercase ( self , __lowercase = None , __lowercase = None , ) -> List[str]:
lowerCamelCase : Tuple =nn.Parameter(self.mean.to(__lowercase ).to(__lowercase ) )
lowerCamelCase : Dict =nn.Parameter(self.std.to(__lowercase ).to(__lowercase ) )
return self
def __lowercase ( self , __lowercase ) -> Optional[Any]:
lowerCamelCase : Any =(embeds - self.mean) * 1.0 / self.std
return embeds
def __lowercase ( self , __lowercase ) -> Optional[int]:
lowerCamelCase : Any =(embeds * self.std) + self.mean
return embeds
| 262 | 1 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_lowerCAmelCase :List[Any] = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 251 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
lowerCAmelCase = '''transfo-xl'''
lowerCAmelCase = ['''mems''']
lowerCAmelCase = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , a=26_77_35 , a=[2_00_00, 4_00_00, 20_00_00] , a=10_24 , a=10_24 , a=16 , a=64 , a=40_96 , a=4 , a=False , a=18 , a=16_00 , a=10_00 , a=True , a=True , a=0 , a=-1 , a=True , a=0.1 , a=0.0 , a=True , a="normal" , a=0.01 , a=0.01 , a=0.02 , a=1E-5 , a=0 , **a , ) -> Union[str, Any]:
snake_case_ = vocab_size
snake_case_ = []
self.cutoffs.extend(a )
if proj_share_all_but_first:
snake_case_ = [False] + [True] * len(self.cutoffs )
else:
snake_case_ = [False] + [False] * len(self.cutoffs )
snake_case_ = d_model
snake_case_ = d_embed
snake_case_ = d_head
snake_case_ = d_inner
snake_case_ = div_val
snake_case_ = pre_lnorm
snake_case_ = n_layer
snake_case_ = n_head
snake_case_ = mem_len
snake_case_ = same_length
snake_case_ = attn_type
snake_case_ = clamp_len
snake_case_ = sample_softmax
snake_case_ = adaptive
snake_case_ = dropout
snake_case_ = dropatt
snake_case_ = untie_r
snake_case_ = init
snake_case_ = init_range
snake_case_ = proj_init_std
snake_case_ = init_std
snake_case_ = layer_norm_epsilon
super().__init__(eos_token_id=a , **a )
@property
def _UpperCamelCase ( self ) -> int:
# Message copied from Transformer-XL documentation
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def _UpperCamelCase ( self , a ) -> List[str]:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 198 | 0 |
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ,_a : Dict ,_a : Any=2 ,_a : Dict=8 ,_a : str=True ,_a : List[Any]=True ,_a : int=True ,_a : Union[str, Any]=True ,_a : List[str]=99 ,_a : Any=16 ,_a : Optional[Any]=5 ,_a : Any=2 ,_a : List[Any]=36 ,_a : Any="gelu" ,_a : str=0.0 ,_a : List[str]=0.0 ,_a : Union[str, Any]=512 ,_a : Dict=16 ,_a : str=2 ,_a : Any=0.02 ,_a : Union[str, Any]=3 ,_a : List[str]=4 ,_a : Optional[int]=None ,):
'''simple docstring'''
A_ : Optional[Any] = parent
A_ : Optional[Any] = batch_size
A_ : Optional[Any] = seq_length
A_ : List[str] = is_training
A_ : Optional[int] = use_input_mask
A_ : List[str] = use_token_type_ids
A_ : Optional[Any] = use_labels
A_ : int = vocab_size
A_ : List[str] = hidden_size
A_ : Optional[int] = num_hidden_layers
A_ : Dict = num_attention_heads
A_ : Optional[int] = intermediate_size
A_ : int = hidden_act
A_ : str = hidden_dropout_prob
A_ : Tuple = attention_probs_dropout_prob
A_ : Optional[int] = max_position_embeddings
A_ : Optional[Any] = type_vocab_size
A_ : Union[str, Any] = type_sequence_label_size
A_ : Optional[Any] = initializer_range
A_ : Optional[int] = num_labels
A_ : Dict = num_choices
A_ : str = scope
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : int = None
if self.use_input_mask:
A_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Optional[Any] = None
if self.use_token_type_ids:
A_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A_ : Dict = None
A_ : List[str] = None
A_ : Dict = None
if self.use_labels:
A_ : Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : int = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A_ : List[Any] = ids_tensor([self.batch_size] ,self.num_choices )
A_ : Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : Optional[Any] ):
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_a ,initializer_range=self.initializer_range ,)
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Union[str, Any] = self.get_config()
A_ : Dict = 300
return config
def _a ( self : Union[str, Any] ):
'''simple docstring'''
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Union[str, Any] = self.prepare_config_and_inputs()
A_ : Optional[int] = True
A_ : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _a ( self : Tuple ,_a : str ,_a : Optional[int] ,_a : List[Any] ,_a : Optional[int] ,_a : str ,_a : str ,_a : List[Any] ):
'''simple docstring'''
A_ : List[Any] = MraModel(config=_a )
model.to(_a )
model.eval()
A_ : Any = model(_a ,attention_mask=_a ,token_type_ids=_a )
A_ : List[str] = model(_a ,token_type_ids=_a )
A_ : str = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : int ,_a : List[Any] ,_a : Optional[Any] ,_a : List[str] ,_a : Optional[Any] ,_a : List[str] ,_a : int ,_a : Union[str, Any] ,_a : str ,_a : List[Any] ,):
'''simple docstring'''
A_ : Optional[int] = True
A_ : Any = MraModel(_a )
model.to(_a )
model.eval()
A_ : List[Any] = model(
_a ,attention_mask=_a ,token_type_ids=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,)
A_ : str = model(
_a ,attention_mask=_a ,token_type_ids=_a ,encoder_hidden_states=_a ,)
A_ : Union[str, Any] = model(_a ,attention_mask=_a ,token_type_ids=_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Optional[Any] ,_a : int ,_a : Optional[Any] ,_a : int ,_a : Tuple ,_a : Tuple ,_a : Dict ,_a : str ):
'''simple docstring'''
A_ : Optional[Any] = MraForMaskedLM(config=_a )
model.to(_a )
model.eval()
A_ : Optional[int] = model(_a ,attention_mask=_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Optional[Any] ,_a : str ,_a : Optional[Any] ,_a : Tuple ,_a : Union[str, Any] ,_a : int ,_a : List[Any] ,_a : str ):
'''simple docstring'''
A_ : str = MraForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
A_ : Any = model(
_a ,attention_mask=_a ,token_type_ids=_a ,start_positions=_a ,end_positions=_a ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _a ( self : str ,_a : int ,_a : str ,_a : Optional[int] ,_a : Any ,_a : Optional[Any] ,_a : str ,_a : int ):
'''simple docstring'''
A_ : Optional[Any] = self.num_labels
A_ : Dict = MraForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : List[str] = model(_a ,attention_mask=_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _a ( self : Any ,_a : Optional[Any] ,_a : Optional[int] ,_a : str ,_a : Optional[int] ,_a : Optional[Any] ,_a : Tuple ,_a : Optional[Any] ):
'''simple docstring'''
A_ : Any = self.num_labels
A_ : Union[str, Any] = MraForTokenClassification(config=_a )
model.to(_a )
model.eval()
A_ : Any = model(_a ,attention_mask=_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _a ( self : Union[str, Any] ,_a : str ,_a : List[str] ,_a : Tuple ,_a : Optional[Any] ,_a : str ,_a : Tuple ,_a : Tuple ):
'''simple docstring'''
A_ : Any = self.num_choices
A_ : int = MraForMultipleChoice(config=_a )
model.to(_a )
model.eval()
A_ : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A_ : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A_ : Any = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A_ : Optional[int] = model(
_a ,attention_mask=_a ,token_type_ids=_a ,labels=_a ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _a ( self : str ):
'''simple docstring'''
A_ : Any = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Optional[Any] = config_and_inputs
A_ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
a_ = False
a_ = False
a_ = False
a_ = False
a_ = ()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : List[str] = MraModelTester(self )
A_ : Optional[int] = ConfigTester(self ,config_class=_a ,hidden_size=37 )
def _a ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A_ : List[Any] = type
self.model_tester.create_and_check_model(*_a )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_a )
def _a ( self : List[str] ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_a )
def _a ( self : int ):
'''simple docstring'''
A_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Dict = MraModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip(reason="""MRA does not output attentions""" )
def _a ( self : Tuple ):
'''simple docstring'''
return
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _a ( self : str ):
'''simple docstring'''
A_ : Any = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
A_ : Dict = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
A_ : Any = model(_a )[0]
A_ : str = torch.Size((1, 256, 768) )
self.assertEqual(output.shape ,_a )
A_ : List[Any] = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,_a ,atol=1e-4 ) )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Optional[int] = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
A_ : Union[str, Any] = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
A_ : Tuple = model(_a )[0]
A_ : str = 50265
A_ : Dict = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape ,_a )
A_ : Tuple = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,_a ,atol=1e-4 ) )
@slow
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : str = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
A_ : Optional[int] = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
A_ : List[Any] = model(_a )[0]
A_ : List[str] = 50265
A_ : Union[str, Any] = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape ,_a )
A_ : Optional[int] = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,_a ,atol=1e-4 ) )
| 27 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : str=True , lowerCamelCase : Optional[Any]="pt"):
A_ : Optional[int] = {"""add_prefix_space""": True} if isinstance(lowerCamelCase , lowerCamelCase) and not line.startswith(""" """) else {}
A_ : Optional[int] = padding_side
return tokenizer(
[line] , max_length=lowerCamelCase , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase , **lowerCamelCase , )
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any]=None , ):
A_ : Dict = input_ids.ne(lowerCamelCase).any(dim=0)
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] ,_a : Optional[Any] ,_a : Tuple ,_a : Dict ,_a : Tuple ,_a : Tuple="train" ,_a : Optional[int]=None ,_a : Any=None ,_a : int=None ,_a : Union[str, Any]="" ,):
'''simple docstring'''
super().__init__()
A_ : Union[str, Any] = Path(_a ).joinpath(type_path + """.source""" )
A_ : Any = Path(_a ).joinpath(type_path + """.target""" )
A_ : Dict = self.get_char_lens(self.src_file )
A_ : Optional[int] = max_source_length
A_ : List[str] = max_target_length
assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}'
A_ : List[Any] = tokenizer
A_ : Optional[Any] = prefix
if n_obs is not None:
A_ : Any = self.src_lens[:n_obs]
A_ : Optional[int] = src_lang
A_ : Tuple = tgt_lang
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self : List[str] ,_a : Tuple ):
'''simple docstring'''
A_ : int = index + 1 # linecache starts at 1
A_ : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) ,_a ).rstrip("""\n""" )
A_ : Dict = linecache.getline(str(self.tgt_file ) ,_a ).rstrip("""\n""" )
assert source_line, f'empty source line for index {index}'
assert tgt_line, f'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,_a ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
A_ : List[str] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,_a ) else self.tokenizer
)
A_ : Any = self.tokenizer.generator if isinstance(self.tokenizer ,_a ) else self.tokenizer
A_ : Optional[int] = encode_line(_a ,_a ,self.max_source_length ,"""right""" )
A_ : Optional[int] = encode_line(_a ,_a ,self.max_target_length ,"""right""" )
A_ : Optional[Any] = source_inputs["""input_ids"""].squeeze()
A_ : Dict = target_inputs["""input_ids"""].squeeze()
A_ : Union[str, Any] = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _a ( _a : int ):
'''simple docstring'''
return [len(_a ) for x in Path(_a ).open().readlines()]
def _a ( self : Optional[int] ,_a : Dict ):
'''simple docstring'''
A_ : str = torch.stack([x["""input_ids"""] for x in batch] )
A_ : Optional[Any] = torch.stack([x["""attention_mask"""] for x in batch] )
A_ : str = torch.stack([x["""decoder_input_ids"""] for x in batch] )
A_ : Union[str, Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,_a )
else self.tokenizer.pad_token_id
)
A_ : str = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,_a )
else self.tokenizer.pad_token_id
)
A_ : List[str] = trim_batch(_a ,_a )
A_ , A_ : Union[str, Any] = trim_batch(_a ,_a ,attention_mask=_a )
A_ : List[str] = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
__magic_name__ = getLogger(__name__)
def lowerCamelCase ( lowerCamelCase : List[List]):
return list(itertools.chain.from_iterable(lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : str):
A_ : Union[str, Any] = get_git_info()
save_json(lowerCamelCase , os.path.join(lowerCamelCase , """git_log.json"""))
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : List[str]=4 , **lowerCamelCase : List[str]):
with open(lowerCamelCase , """w""") as f:
json.dump(lowerCamelCase , lowerCamelCase , indent=lowerCamelCase , **lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : Any):
with open(lowerCamelCase) as f:
return json.load(lowerCamelCase)
def lowerCamelCase ( ):
A_ : List[str] = git.Repo(search_parent_directories=lowerCamelCase)
A_ : Union[str, Any] = {
"""repo_id""": str(lowerCamelCase),
"""repo_sha""": str(repo.head.object.hexsha),
"""repo_branch""": str(repo.active_branch),
"""hostname""": str(socket.gethostname()),
}
return repo_infos
def lowerCamelCase ( lowerCamelCase : Callable , lowerCamelCase : Iterable):
return list(map(lowerCamelCase , lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Union[str, Any]):
with open(lowerCamelCase , """wb""") as f:
return pickle.dump(lowerCamelCase , lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : List[str]):
def remove_articles(lowerCamelCase : Any):
return re.sub(r"""\b(a|an|the)\b""" , """ """ , lowerCamelCase)
def white_space_fix(lowerCamelCase : List[Any]):
return " ".join(text.split())
def remove_punc(lowerCamelCase : Union[str, Any]):
A_ : Optional[int] = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(lowerCamelCase : List[str]):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase))))
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int):
A_ : Tuple = normalize_answer(lowerCamelCase).split()
A_ : Dict = normalize_answer(lowerCamelCase).split()
A_ : int = Counter(lowerCamelCase) & Counter(lowerCamelCase)
A_ : Any = sum(common.values())
if num_same == 0:
return 0
A_ : Any = 1.0 * num_same / len(lowerCamelCase)
A_ : Any = 1.0 * num_same / len(lowerCamelCase)
A_ : Any = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Any):
return normalize_answer(lowerCamelCase) == normalize_answer(lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[str]):
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : Any = 0
for hypo, pred in zip(lowerCamelCase , lowerCamelCase):
em += exact_match_score(lowerCamelCase , lowerCamelCase)
if len(lowerCamelCase) > 0:
em /= len(lowerCamelCase)
return {"em": em}
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
return model_prefix.startswith("""rag""")
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Union[str, Any]):
A_ : Optional[Any] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
A_ : Tuple = """dropout_rate"""
for p in extra_params:
if getattr(lowerCamelCase , lowerCamelCase , lowerCamelCase):
if not hasattr(lowerCamelCase , lowerCamelCase) and not hasattr(lowerCamelCase , equivalent_param[p]):
logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase))
delattr(lowerCamelCase , lowerCamelCase)
continue
A_ : Tuple = p if hasattr(lowerCamelCase , lowerCamelCase) else equivalent_param[p]
setattr(lowerCamelCase , lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase))
delattr(lowerCamelCase , lowerCamelCase)
return hparams, config
| 27 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__a = {"tokenization_bertweet": ["BertweetTokenizer"]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 374 |
'''simple docstring'''
__a = "Alexander Joslin"
import operator as op
from .stack import Stack
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : str = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
snake_case__ : Stack[int] = Stack()
snake_case__ : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_lowerCAmelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(_lowerCAmelCase )
elif i == ")":
# RULE 4
snake_case__ : Union[str, Any] = operator_stack.peek()
operator_stack.pop()
snake_case__ : Any = operand_stack.peek()
operand_stack.pop()
snake_case__ : str = operand_stack.peek()
operand_stack.pop()
snake_case__ : Optional[Any] = operators[opr](_lowerCAmelCase , _lowerCAmelCase )
operand_stack.push(_lowerCAmelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__a = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(F"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 374 | 1 |
def _a ( UpperCAmelCase , UpperCAmelCase ) -> list[str]:
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(UpperCAmelCase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 130 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_A : List[Any] = logging.get_logger(__name__)
def _a ( UpperCAmelCase ) -> Tuple:
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
lowerCamelCase__ : Union[str, Any] = MaskFormerConfig(backbone_config=UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
lowerCamelCase__ : int = 847
lowerCamelCase__ : Dict = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
lowerCamelCase__ : Dict = 150
lowerCamelCase__ : Optional[int] = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
lowerCamelCase__ : List[str] = 171
lowerCamelCase__ : Dict = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
lowerCamelCase__ : Dict = 133
lowerCamelCase__ : Tuple = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
lowerCamelCase__ : int = 19
lowerCamelCase__ : Dict = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
lowerCamelCase__ : List[Any] = 65
lowerCamelCase__ : Optional[int] = '''mapillary-vistas-id2label.json'''
lowerCamelCase__ : Optional[Any] = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase__ : List[Any] = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
return config
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.norm1.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.norm1.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.attn.relative_position_index", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.attn.proj.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.attn.proj.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.norm2.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.norm2.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.mlp.fc1.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.mlp.fc1.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.mlp.fc2.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((f"backbone.layers.{i}.blocks.{j}.mlp.fc2.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((f"backbone.layers.{i}.downsample.reduction.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((f"backbone.layers.{i}.downsample.norm.weight", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((f"backbone.layers.{i}.downsample.norm.bias", f"model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append((f"backbone.norm{i}.weight", f"model.pixel_level_module.encoder.hidden_states_norms.{i}.weight") )
rename_keys.append((f"backbone.norm{i}.bias", f"model.pixel_level_module.encoder.hidden_states_norms.{i}.bias") )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f"sem_seg_head.adapter_{source_index}.weight", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight") )
rename_keys.append((f"sem_seg_head.adapter_{source_index}.norm.weight", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight") )
rename_keys.append((f"sem_seg_head.adapter_{source_index}.norm.bias", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias") )
rename_keys.append((f"sem_seg_head.layer_{source_index}.weight", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight") )
rename_keys.append((f"sem_seg_head.layer_{source_index}.norm.weight", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight") )
rename_keys.append((f"sem_seg_head.layer_{source_index}.norm.bias", f"model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias") )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight", f"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias", f"model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias") )
# cross-attention out projection
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight", f"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias", f"model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias") )
# MLP 1
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight", f"model.transformer_module.decoder.layers.{idx}.fc1.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias", f"model.transformer_module.decoder.layers.{idx}.fc1.bias") )
# MLP 2
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight", f"model.transformer_module.decoder.layers.{idx}.fc2.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias", f"model.transformer_module.decoder.layers.{idx}.fc2.bias") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight", f"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias", f"model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight", f"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias", f"model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias") )
# layernorm 3 (final layernorm)
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight", f"model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight") )
rename_keys.append((f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias", f"model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias") )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((f"sem_seg_head.predictor.mask_embed.layers.{i}.weight", f"mask_embedder.{i}.0.weight") )
rename_keys.append((f"sem_seg_head.predictor.mask_embed.layers.{i}.bias", f"mask_embedder.{i}.0.bias") )
# fmt: on
return rename_keys
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : str = dct.pop(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = val
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Any:
"""simple docstring"""
lowerCamelCase__ : str = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowerCamelCase__ : Optional[int] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCamelCase__ : Any = state_dict.pop(f"backbone.layers.{i}.blocks.{j}.attn.qkv.weight" )
lowerCamelCase__ : Optional[Any] = state_dict.pop(f"backbone.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : int = in_proj_weight[:dim, :]
lowerCamelCase__ : Optional[Any] = in_proj_bias[: dim]
lowerCamelCase__ : Optional[Any] = in_proj_weight[
dim : dim * 2, :
]
lowerCamelCase__ : str = in_proj_bias[
dim : dim * 2
]
lowerCamelCase__ : Optional[int] = in_proj_weight[
-dim :, :
]
lowerCamelCase__ : Optional[int] = in_proj_bias[-dim :]
# fmt: on
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
# fmt: off
lowerCamelCase__ : Union[str, Any] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowerCamelCase__ : List[str] = state_dict.pop(f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight" )
lowerCamelCase__ : str = state_dict.pop(f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : Union[str, Any] = in_proj_weight[: hidden_size, :]
lowerCamelCase__ : str = in_proj_bias[:config.hidden_size]
lowerCamelCase__ : Any = in_proj_weight[hidden_size : hidden_size * 2, :]
lowerCamelCase__ : Tuple = in_proj_bias[hidden_size : hidden_size * 2]
lowerCamelCase__ : Dict = in_proj_weight[-hidden_size :, :]
lowerCamelCase__ : str = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
lowerCamelCase__ : int = state_dict.pop(f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight" )
lowerCamelCase__ : Optional[int] = state_dict.pop(f"sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ : int = in_proj_weight[: hidden_size, :]
lowerCamelCase__ : str = in_proj_bias[:config.hidden_size]
lowerCamelCase__ : List[str] = in_proj_weight[hidden_size : hidden_size * 2, :]
lowerCamelCase__ : int = in_proj_bias[hidden_size : hidden_size * 2]
lowerCamelCase__ : Any = in_proj_weight[-hidden_size :, :]
lowerCamelCase__ : Optional[Any] = in_proj_bias[-hidden_size :]
# fmt: on
def _a ( ) -> torch.Tensor:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase__ : Optional[int] = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : List[str] = get_maskformer_config(UpperCAmelCase )
# load original state_dict
with open(UpperCAmelCase , '''rb''' ) as f:
lowerCamelCase__ : Tuple = pickle.load(UpperCAmelCase )
lowerCamelCase__ : Tuple = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
lowerCamelCase__ : Tuple = create_rename_keys(UpperCAmelCase )
for src, dest in rename_keys:
rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
read_in_swin_q_k_v(UpperCAmelCase , config.backbone_config )
read_in_decoder_q_k_v(UpperCAmelCase , UpperCAmelCase )
# update to torch tensors
for key, value in state_dict.items():
lowerCamelCase__ : Tuple = torch.from_numpy(UpperCAmelCase )
# load 🤗 model
lowerCamelCase__ : Any = MaskFormerForInstanceSegmentation(UpperCAmelCase )
model.eval()
for name, param in model.named_parameters():
print(UpperCAmelCase , param.shape )
lowerCamelCase__ , lowerCamelCase__ : Any = model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(UpperCAmelCase ) == 0, f"Unexpected keys: {unexpected_keys}"
# verify results
lowerCamelCase__ : List[str] = prepare_img()
if "vistas" in model_name:
lowerCamelCase__ : Any = 65
elif "cityscapes" in model_name:
lowerCamelCase__ : Optional[Any] = 65535
else:
lowerCamelCase__ : List[Any] = 255
lowerCamelCase__ : int = True if '''ade''' in model_name else False
lowerCamelCase__ : str = MaskFormerImageProcessor(ignore_index=UpperCAmelCase , reduce_labels=UpperCAmelCase )
lowerCamelCase__ : List[str] = image_processor(UpperCAmelCase , return_tensors='''pt''' )
lowerCamelCase__ : Dict = model(**UpperCAmelCase )
print('''Logits:''' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
lowerCamelCase__ : Any = torch.tensor(
[[3.63_53, -4.47_70, -2.60_65], [0.50_81, -4.23_94, -3.53_43], [2.19_09, -5.03_53, -1.93_23]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and image processor to {pytorch_dump_folder_path}" )
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
image_processor.save_pretrained(UpperCAmelCase )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(f"nielsr/{model_name}" )
image_processor.push_to_hub(f"nielsr/{model_name}" )
if __name__ == "__main__":
_A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_A : Union[str, Any] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 130 | 1 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar('''T''')
lowerCamelCase__ = TypeVar('''U''')
class __magic_name__ (Generic[T, U] ):
def __init__( self , _a , _a ) -> Dict:
lowerCAmelCase_ = key
lowerCAmelCase_ = val
lowerCAmelCase_ = None
lowerCAmelCase_ = None
def __repr__( self ) -> str:
return (
f"Node: key: {self.key}, val: {self.val}, "
f"has next: {bool(self.next )}, has prev: {bool(self.prev )}"
)
class __magic_name__ (Generic[T, U] ):
def __init__( self ) -> None:
lowerCAmelCase_ = DoubleLinkedListNode(_a , _a )
lowerCAmelCase_ = DoubleLinkedListNode(_a , _a )
lowerCAmelCase_ , lowerCAmelCase_ = self.rear, self.head
def __repr__( self ) -> str:
lowerCAmelCase_ = ["DoubleLinkedList"]
lowerCAmelCase_ = self.head
while node.next is not None:
rep.append(str(_a ) )
lowerCAmelCase_ = node.next
rep.append(str(self.rear ) )
return ",\n ".join(_a )
def __a ( self , _a ) -> None:
lowerCAmelCase_ = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
lowerCAmelCase_ = node
lowerCAmelCase_ = previous
lowerCAmelCase_ = node
lowerCAmelCase_ = self.rear
def __a ( self , _a ) -> DoubleLinkedListNode[T, U] | None:
if node.prev is None or node.next is None:
return None
lowerCAmelCase_ = node.next
lowerCAmelCase_ = node.prev
lowerCAmelCase_ = None
lowerCAmelCase_ = None
return node
class __magic_name__ (Generic[T, U] ):
lowerCamelCase__ = {}
def __init__( self , _a ) -> Tuple:
lowerCAmelCase_ = DoubleLinkedList()
lowerCAmelCase_ = capacity
lowerCAmelCase_ = 0
lowerCAmelCase_ = 0
lowerCAmelCase_ = 0
lowerCAmelCase_ = {}
def __repr__( self ) -> str:
return (
f"CacheInfo(hits={self.hits}, misses={self.miss}, "
f"capacity={self.capacity}, current size={self.num_keys})"
)
def __contains__( self , _a ) -> bool:
return key in self.cache
def __a ( self , _a ) -> U | None:
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
lowerCAmelCase_ = self.cache[key]
lowerCAmelCase_ = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(_a )
return node.val
self.miss += 1
return None
def __a ( self , _a , _a ) -> None:
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
lowerCAmelCase_ = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(_a ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
lowerCAmelCase_ = DoubleLinkedListNode(_a , _a )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
lowerCAmelCase_ = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
lowerCAmelCase_ = value
self.list.add(_a )
@classmethod
def __a ( cls , _a = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
def cache_decorator_inner(_a ) -> Callable[..., U]:
def cache_decorator_wrapper(*_a ) -> U:
if func not in cls.decorator_function_to_instance_map:
lowerCAmelCase_ = LRUCache(_a )
lowerCAmelCase_ = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
lowerCAmelCase_ = func(*_a )
cls.decorator_function_to_instance_map[func].put(args[0] , _a )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(_a , "cache_info" , _a ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 122 |
from collections import defaultdict
class __magic_name__ :
def __init__( self , _a , _a ) -> Tuple:
lowerCAmelCase_ = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
lowerCAmelCase_ = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(_a ) )
]
lowerCAmelCase_ = defaultdict(_a ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
lowerCAmelCase_ = (1 << len(_a )) - 1
def __a ( self , _a , _a ) -> Optional[Any]:
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
lowerCAmelCase_ = self.count_ways_until(_a , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
lowerCAmelCase_ = total_ways_util
return self.dp[mask][task_no]
def __a ( self , _a ) -> Optional[int]:
# Store the list of persons for each task
for i in range(len(_a ) ):
for j in task_performed[i]:
self.task[j].append(_a )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
lowerCamelCase__ = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
lowerCamelCase__ = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 122 | 1 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __snake_case :
'''simple docstring'''
def __init__( self : int , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any]=13 , _UpperCamelCase : List[str]=64 , _UpperCamelCase : Any=2 , _UpperCamelCase : Tuple=3 , _UpperCamelCase : List[Any]=True , _UpperCamelCase : str=True , _UpperCamelCase : List[str]=32 , _UpperCamelCase : str=5 , _UpperCamelCase : Tuple=4 , _UpperCamelCase : Tuple=37 , _UpperCamelCase : List[str]="gelu" , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : Optional[Any]=0.1 , _UpperCamelCase : str=10 , _UpperCamelCase : str=0.0_2 , _UpperCamelCase : Union[str, Any]=[1, 16, 4, 4] , _UpperCamelCase : Dict=None , ) ->int:
"""simple docstring"""
_lowerCamelCase : int = parent
_lowerCamelCase : str = batch_size
_lowerCamelCase : Tuple = image_size
_lowerCamelCase : List[Any] = patch_size
_lowerCamelCase : Optional[Any] = num_channels
_lowerCamelCase : List[str] = is_training
_lowerCamelCase : List[Any] = use_labels
_lowerCamelCase : Optional[Any] = hidden_size
_lowerCamelCase : Any = num_hidden_layers
_lowerCamelCase : List[Any] = num_attention_heads
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : List[str] = hidden_dropout_prob
_lowerCamelCase : str = attention_probs_dropout_prob
_lowerCamelCase : int = type_sequence_label_size
_lowerCamelCase : Dict = initializer_range
_lowerCamelCase : Tuple = scope
_lowerCamelCase : Dict = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
_lowerCamelCase : Optional[int] = (self.image_size // 32) ** 2
_lowerCamelCase : List[str] = num_patches + 1
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowerCamelCase : str = None
if self.use_labels:
_lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_lowerCamelCase : Dict = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [4, 8, 16, 32],
"""num_groups""": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A__ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=A__ , )
def _SCREAMING_SNAKE_CASE ( self : Any , _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : List[Any]) ->Any:
"""simple docstring"""
_lowerCamelCase : Optional[int] = ViTHybridModel(config=A__)
model.to(A__)
model.eval()
_lowerCamelCase : Tuple = model(A__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCamelCase : Dict , _UpperCamelCase : Any , _UpperCamelCase : List[Any]) ->Dict:
"""simple docstring"""
_lowerCamelCase : Dict = self.type_sequence_label_size
_lowerCamelCase : Dict = ViTHybridForImageClassification(A__)
model.to(A__)
model.eval()
_lowerCamelCase : Any = model(A__ , labels=A__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Tuple:
"""simple docstring"""
_lowerCamelCase : List[str] = self.prepare_config_and_inputs()
_lowerCamelCase : Union[str, Any] = config_and_inputs
_lowerCamelCase : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
_snake_case = (
{'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
_snake_case = False
_snake_case = False
_snake_case = False
def _SCREAMING_SNAKE_CASE ( self : Dict) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ViTHybridModelTester(self)
_lowerCamelCase : List[Any] = ConfigTester(self , config_class=A__ , has_text_modality=A__ , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self : str) ->List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""")
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->str:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self : List[str]) ->Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : int = model_class(A__)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
_lowerCamelCase : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A__ , nn.Linear))
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = model_class(A__)
_lowerCamelCase : str = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : Optional[int] = [*signature.parameters.keys()]
_lowerCamelCase : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A__)
def _SCREAMING_SNAKE_CASE ( self : str) ->str:
"""simple docstring"""
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__)
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A__)
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase : Any = _config_zero_init(A__)
for model_class in self.all_model_classes:
_lowerCamelCase : List[str] = model_class(config=A__)
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
_lowerCamelCase : Tuple = [F"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def _SCREAMING_SNAKE_CASE ( self : Any) ->int:
"""simple docstring"""
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Union[str, Any] = ViTHybridModel.from_pretrained(A__)
self.assertIsNotNone(A__)
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict:
"""simple docstring"""
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[str]:
"""simple docstring"""
_lowerCamelCase : str = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(
A__)
_lowerCamelCase : Optional[Any] = self.default_image_processor
_lowerCamelCase : Tuple = prepare_img()
_lowerCamelCase : Optional[int] = image_processor(images=A__ , return_tensors="""pt""").to(A__)
# forward pass
with torch.no_grad():
_lowerCamelCase : List[Any] = model(**A__)
# verify the logits
_lowerCamelCase : Tuple = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , A__)
_lowerCamelCase : List[Any] = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9]).to(A__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A__ , atol=1E-4))
@slow
@require_accelerate
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : str = ViTHybridImageProcessor.from_pretrained("""google/vit-hybrid-base-bit-384""")
_lowerCamelCase : Tuple = ViTHybridForImageClassification.from_pretrained("""google/vit-hybrid-base-bit-384""" , device_map="""auto""")
_lowerCamelCase : Optional[Any] = prepare_img()
_lowerCamelCase : Union[str, Any] = image_processor(images=A__ , return_tensors="""pt""")
_lowerCamelCase : Union[str, Any] = model(**A__)
_lowerCamelCase : Tuple = outputs.logits
# model predicts one of the 1000 ImageNet classes
_lowerCamelCase : Union[str, Any] = logits.argmax(-1).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , """tabby, tabby cat""")
| 719 | import math
def A__ ( __A ):
'''simple docstring'''
assert isinstance(__A , __A ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_lowerCamelCase : List[Any] = range(3 , int(math.sqrt(__A ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def A__ ( __A , __A=1 , **__A ):
'''simple docstring'''
_lowerCamelCase : Dict = factor * value
_lowerCamelCase : str = value
while not is_prime(__A ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **__A )
return value
| 15 | 0 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : List[Any] = logging.get_logger(__name__)
_a : Dict = {'vocab_file': 'spiece.model'}
_a : Union[str, Any] = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
_a : Union[str, Any] = {
'google/bigbird-roberta-base': 4_096,
'google/bigbird-roberta-large': 4_096,
'google/bigbird-base-trivia-itc': 4_096,
}
class lowercase_ ( a ):
'''simple docstring'''
__lowerCAmelCase : Optional[int] = VOCAB_FILES_NAMES
__lowerCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : List[str] = ["input_ids", "attention_mask"]
__lowerCAmelCase : List[int] = []
def __init__( self , a_ , a_="<unk>" , a_="<s>" , a_="</s>" , a_="<pad>" , a_="[SEP]" , a_="[MASK]" , a_="[CLS]" , a_ = None , **a_ , ) -> None:
"""simple docstring"""
UpperCAmelCase = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else bos_token
UpperCAmelCase = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else eos_token
UpperCAmelCase = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else unk_token
UpperCAmelCase = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else pad_token
UpperCAmelCase = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else cls_token
UpperCAmelCase = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a_ , eos_token=a_ , unk_token=a_ , pad_token=a_ , sep_token=a_ , mask_token=a_ , cls_token=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
UpperCAmelCase = vocab_file
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a_ )
@property
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return self.sp_model.get_piece_size()
def snake_case_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.__dict__.copy()
UpperCAmelCase = None
return state
def __setstate__( self , a_ ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCAmelCase = {}
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case_ ( self , a_ ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(a_ , out_type=a_ )
def snake_case_ ( self , a_ ) -> Dict:
"""simple docstring"""
return self.sp_model.piece_to_id(a_ )
def snake_case_ ( self , a_ ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.sp_model.IdToPiece(a_ )
return token
def snake_case_ ( self , a_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = ''
UpperCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a_ ) + token
UpperCAmelCase = True
UpperCAmelCase = []
else:
current_sub_tokens.append(a_ )
UpperCAmelCase = False
out_string += self.sp_model.decode(a_ )
return out_string.strip()
def snake_case_ ( self , a_ , a_ = False , a_ = None , a_ = True , **a_ , ) -> str:
"""simple docstring"""
UpperCAmelCase = kwargs.pop('use_source_tokenizer' , a_ )
UpperCAmelCase = self.convert_ids_to_tokens(a_ , skip_special_tokens=a_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
UpperCAmelCase = []
UpperCAmelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a_ ) )
UpperCAmelCase = []
sub_texts.append(a_ )
else:
current_sub_text.append(a_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a_ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
UpperCAmelCase = re.sub(r' (\[(MASK|SEP)\])' , r'\1' , ' '.join(a_ ) )
else:
UpperCAmelCase = ''.join(a_ )
UpperCAmelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
UpperCAmelCase = self.clean_up_tokenization(a_ )
return clean_text
else:
return text
def snake_case_ ( self , a_ , a_ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(a_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase = os.path.join(
a_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , 'wb' ) as fi:
UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
def snake_case_ ( self , a_ , a_ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
UpperCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case_ ( self , a_ , a_ = None , a_ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
if token_ids_a is None:
return [1] + ([0] * len(a_ )) + [1]
return [1] + ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1]
def snake_case_ ( self , a_ , a_ = None ) -> List[int]:
"""simple docstring"""
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 447 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class lowercase_ ( a ):
'''simple docstring'''
@require_torch
def snake_case_ ( self ) -> Optional[Any]:
"""simple docstring"""
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
UpperCAmelCase = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
UpperCAmelCase = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
UpperCAmelCase = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
UpperCAmelCase = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(a_ )
BertModel.from_pretrained(a_ )
BertTokenizer.from_pretrained(a_ )
pipeline(task='fill-mask' , model=a_ )
# baseline - just load from_pretrained with normal network
UpperCAmelCase = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
UpperCAmelCase = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase = '1'
UpperCAmelCase = subprocess.run(a_ , env=a_ , check=a_ , capture_output=a_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
UpperCAmelCase = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
UpperCAmelCase = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
UpperCAmelCase = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
UpperCAmelCase = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(a_ )
BertModel.from_pretrained(a_ )
BertTokenizer.from_pretrained(a_ )
pipeline(task='fill-mask' , model=a_ )
# baseline - just load from_pretrained with normal network
UpperCAmelCase = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
UpperCAmelCase = self.get_env()
UpperCAmelCase = subprocess.run(a_ , env=a_ , check=a_ , capture_output=a_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
UpperCAmelCase = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
UpperCAmelCase = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
UpperCAmelCase = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
UpperCAmelCase = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
UpperCAmelCase = self.get_env()
UpperCAmelCase = subprocess.run(a_ , env=a_ , check=a_ , capture_output=a_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# next emulate no network
UpperCAmelCase = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase = '1'
UpperCAmelCase = subprocess.run(a_ , env=a_ , check=a_ , capture_output=a_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def snake_case_ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = '\nfrom transformers import pipeline\n '
UpperCAmelCase = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
UpperCAmelCase = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
UpperCAmelCase = self.get_env()
UpperCAmelCase = '1'
UpperCAmelCase = [sys.executable, '-c', '\n'.join([load, mock, run] )]
UpperCAmelCase = subprocess.run(a_ , env=a_ , check=a_ , capture_output=a_ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '' ) , )
@require_torch
def snake_case_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase = '\nfrom transformers import AutoModel\n '
UpperCAmelCase = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
UpperCAmelCase = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
UpperCAmelCase = self.get_env()
UpperCAmelCase = subprocess.run(a_ , env=a_ , check=a_ , capture_output=a_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase = '1'
UpperCAmelCase = subprocess.run(a_ , env=a_ , check=a_ , capture_output=a_ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
| 447 | 1 |
import os
from pathlib import Path
def A__ ( ) ->Any:
from torch.utils.cpp_extension import load
__A =Path(__A ).resolve().parent.parent.parent / '''kernels''' / '''deformable_detr'''
__A =[
root / filename
for filename in [
'''vision.cpp''',
os.path.join('''cpu''' , '''ms_deform_attn_cpu.cpp''' ),
os.path.join('''cuda''' , '''ms_deform_attn_cuda.cu''' ),
]
]
load(
'''MultiScaleDeformableAttention''' , __A , with_cuda=__A , extra_include_paths=[str(__A )] , extra_cflags=['''-DWITH_CUDA=1'''] , extra_cuda_cflags=[
'''-DCUDA_HAS_FP16=1''',
'''-D__CUDA_NO_HALF_OPERATORS__''',
'''-D__CUDA_NO_HALF_CONVERSIONS__''',
'''-D__CUDA_NO_HALF2_OPERATORS__''',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 516 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : List[str] = {
'''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = [
'''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LiltForQuestionAnswering''',
'''LiltForSequenceClassification''',
'''LiltForTokenClassification''',
'''LiltModel''',
'''LiltPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
_lowerCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 516 | 1 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase_ ( __snake_case ):
_lowerCamelCase = ['image_processor', 'tokenizer']
_lowerCamelCase = 'Pix2StructImageProcessor'
_lowerCamelCase = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self , lowercase_ , lowercase_ ):
_snake_case : Tuple = False
super().__init__(lowercase_ , lowercase_ )
def __call__( self , lowercase_=None , lowercase_ = None , lowercase_ = True , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = 2_048 , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = False , lowercase_ = False , lowercase_ = False , lowercase_ = False , lowercase_ = False , lowercase_ = True , lowercase_ = None , **lowercase_ , ):
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None and not self.image_processor.is_vqa:
_snake_case : List[Any] = self.tokenizer
_snake_case : int = self.tokenizer(
text=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_token_type_ids=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
_snake_case : Dict = self.image_processor(
lowercase_ , return_tensors=lowercase_ , max_patches=lowercase_ , **lowercase_ )
else:
# add pixel_values and bbox
_snake_case : Optional[Any] = self.image_processor(
lowercase_ , return_tensors=lowercase_ , max_patches=lowercase_ , header_text=lowercase_ , **lowercase_ )
if text is not None and not self.image_processor.is_vqa:
_snake_case : Union[str, Any] = self.tokenizer(
text=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_token_type_ids=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
if "attention_mask" in text_encoding:
_snake_case : str = text_encoding.pop("attention_mask" )
if "input_ids" in text_encoding:
_snake_case : List[str] = text_encoding.pop("input_ids" )
else:
_snake_case : Dict = None
if text_encoding is not None:
encoding_image_processor.update(lowercase_ )
return encoding_image_processor
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def UpperCamelCase ( self ):
_snake_case : Any = self.tokenizer.model_input_names
_snake_case : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 670 | import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def snake_case (*__lowercase ) -> Dict:
'''simple docstring'''
if not isinstance(__lowercase , __lowercase ):
_snake_case : Dict = list(__lowercase )
for i in range(len(__lowercase ) ):
_snake_case : List[str] = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def snake_case (__lowercase ) -> bool:
'''simple docstring'''
_snake_case : str = [
"CUDA out of memory.", # CUDA OOM
"cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.", # CUDNN SNAFU
"DefaultCPUAllocator: can't allocate memory", # CPU OOM
]
if isinstance(__lowercase , __lowercase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def snake_case (__lowercase = None , __lowercase = 128 ) -> Any:
'''simple docstring'''
if function is None:
return functools.partial(__lowercase , starting_batch_size=__lowercase )
_snake_case : List[str] = starting_batch_size
def decorator(*__lowercase , **__lowercase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
_snake_case : Optional[Any] = list(inspect.signature(__lowercase ).parameters.keys() )
# Guard against user error
if len(__lowercase ) < (len(__lowercase ) + 1):
_snake_case : str = ", ".join([F"""{arg}={value}""" for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F"""Batch size was passed into `{function.__name__}` as the first argument when called."""
F"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" )
while True:
if batch_size == 0:
raise RuntimeError("No executable batch size found, reached zero." )
try:
return function(__lowercase , *__lowercase , **__lowercase )
except Exception as e:
if should_reduce_batch_size(__lowercase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator | 670 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class a_ ( _snake_case ):
UpperCamelCase__ : Any ="ctrl"
UpperCamelCase__ : Tuple =["past_key_values"]
UpperCamelCase__ : Tuple ={
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self :Dict , _lowercase :Dict=246534 , _lowercase :str=256 , _lowercase :int=1280 , _lowercase :Dict=8192 , _lowercase :Tuple=48 , _lowercase :List[str]=16 , _lowercase :Tuple=0.1 , _lowercase :int=0.1 , _lowercase :Union[str, Any]=1E-6 , _lowercase :Optional[Any]=0.02 , _lowercase :Tuple=True , **_lowercase :List[Any] , ) -> Any:
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = n_positions
UpperCAmelCase_ = n_embd
UpperCAmelCase_ = n_layer
UpperCAmelCase_ = n_head
UpperCAmelCase_ = dff
UpperCAmelCase_ = resid_pdrop
UpperCAmelCase_ = embd_pdrop
UpperCAmelCase_ = layer_norm_epsilon
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = use_cache
super().__init__(**_lowercase)
| 561 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class a_ ( _snake_case ):
UpperCamelCase__ : torch.FloatTensor
class a_ ( nn.Module ):
def __init__( self :Union[str, Any] , _lowercase :str=3 , _lowercase :List[str]=3 , _lowercase :Dict=("DownEncoderBlock2D",) , _lowercase :Optional[Any]=(64,) , _lowercase :Optional[Any]=2 , _lowercase :Tuple=32 , _lowercase :int="silu" , _lowercase :Union[str, Any]=True , ) -> Union[str, Any]:
super().__init__()
UpperCAmelCase_ = layers_per_block
UpperCAmelCase_ = torch.nn.Convad(
_lowercase , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
UpperCAmelCase_ = None
UpperCAmelCase_ = nn.ModuleList([])
# down
UpperCAmelCase_ = block_out_channels[0]
for i, down_block_type in enumerate(_lowercase):
UpperCAmelCase_ = output_channel
UpperCAmelCase_ = block_out_channels[i]
UpperCAmelCase_ = i == len(_lowercase) - 1
UpperCAmelCase_ = get_down_block(
_lowercase , num_layers=self.layers_per_block , in_channels=_lowercase , out_channels=_lowercase , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=_lowercase , resnet_groups=_lowercase , attention_head_dim=_lowercase , temb_channels=_lowercase , )
self.down_blocks.append(_lowercase)
# mid
UpperCAmelCase_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=_lowercase , output_scale_factor=1 , resnet_time_scale_shift='''default''' , attention_head_dim=block_out_channels[-1] , resnet_groups=_lowercase , temb_channels=_lowercase , )
# out
UpperCAmelCase_ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=_lowercase , eps=1E-6)
UpperCAmelCase_ = nn.SiLU()
UpperCAmelCase_ = 2 * out_channels if double_z else out_channels
UpperCAmelCase_ = nn.Convad(block_out_channels[-1] , _lowercase , 3 , padding=1)
UpperCAmelCase_ = False
def __a ( self :Any , _lowercase :int) -> Optional[Any]:
UpperCAmelCase_ = x
UpperCAmelCase_ = self.conv_in(_lowercase)
if self.training and self.gradient_checkpointing:
def create_custom_forward(_lowercase :Dict):
def custom_forward(*_lowercase :Any):
return module(*_lowercase)
return custom_forward
# down
if is_torch_version('''>=''' , '''1.11.0'''):
for down_block in self.down_blocks:
UpperCAmelCase_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(_lowercase) , _lowercase , use_reentrant=_lowercase)
# middle
UpperCAmelCase_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block) , _lowercase , use_reentrant=_lowercase)
else:
for down_block in self.down_blocks:
UpperCAmelCase_ = torch.utils.checkpoint.checkpoint(create_custom_forward(_lowercase) , _lowercase)
# middle
UpperCAmelCase_ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block) , _lowercase)
else:
# down
for down_block in self.down_blocks:
UpperCAmelCase_ = down_block(_lowercase)
# middle
UpperCAmelCase_ = self.mid_block(_lowercase)
# post-process
UpperCAmelCase_ = self.conv_norm_out(_lowercase)
UpperCAmelCase_ = self.conv_act(_lowercase)
UpperCAmelCase_ = self.conv_out(_lowercase)
return sample
class a_ ( nn.Module ):
def __init__( self :Union[str, Any] , _lowercase :Optional[Any]=3 , _lowercase :List[str]=3 , _lowercase :List[str]=("UpDecoderBlock2D",) , _lowercase :int=(64,) , _lowercase :Optional[Any]=2 , _lowercase :List[Any]=32 , _lowercase :Union[str, Any]="silu" , _lowercase :Optional[int]="group" , ) -> Any:
super().__init__()
UpperCAmelCase_ = layers_per_block
UpperCAmelCase_ = nn.Convad(
_lowercase , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
UpperCAmelCase_ = None
UpperCAmelCase_ = nn.ModuleList([])
UpperCAmelCase_ = in_channels if norm_type == '''spatial''' else None
# mid
UpperCAmelCase_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=_lowercase , output_scale_factor=1 , resnet_time_scale_shift='''default''' if norm_type == '''group''' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=_lowercase , temb_channels=_lowercase , )
# up
UpperCAmelCase_ = list(reversed(_lowercase))
UpperCAmelCase_ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(_lowercase):
UpperCAmelCase_ = output_channel
UpperCAmelCase_ = reversed_block_out_channels[i]
UpperCAmelCase_ = i == len(_lowercase) - 1
UpperCAmelCase_ = get_up_block(
_lowercase , num_layers=self.layers_per_block + 1 , in_channels=_lowercase , out_channels=_lowercase , prev_output_channel=_lowercase , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=_lowercase , resnet_groups=_lowercase , attention_head_dim=_lowercase , temb_channels=_lowercase , resnet_time_scale_shift=_lowercase , )
self.up_blocks.append(_lowercase)
UpperCAmelCase_ = output_channel
# out
if norm_type == "spatial":
UpperCAmelCase_ = SpatialNorm(block_out_channels[0] , _lowercase)
else:
UpperCAmelCase_ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=_lowercase , eps=1E-6)
UpperCAmelCase_ = nn.SiLU()
UpperCAmelCase_ = nn.Convad(block_out_channels[0] , _lowercase , 3 , padding=1)
UpperCAmelCase_ = False
def __a ( self :Union[str, Any] , _lowercase :Dict , _lowercase :List[Any]=None) -> Any:
UpperCAmelCase_ = z
UpperCAmelCase_ = self.conv_in(_lowercase)
UpperCAmelCase_ = next(iter(self.up_blocks.parameters())).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(_lowercase :str):
def custom_forward(*_lowercase :Any):
return module(*_lowercase)
return custom_forward
if is_torch_version('''>=''' , '''1.11.0'''):
# middle
UpperCAmelCase_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block) , _lowercase , _lowercase , use_reentrant=_lowercase)
UpperCAmelCase_ = sample.to(_lowercase)
# up
for up_block in self.up_blocks:
UpperCAmelCase_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(_lowercase) , _lowercase , _lowercase , use_reentrant=_lowercase)
else:
# middle
UpperCAmelCase_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block) , _lowercase , _lowercase)
UpperCAmelCase_ = sample.to(_lowercase)
# up
for up_block in self.up_blocks:
UpperCAmelCase_ = torch.utils.checkpoint.checkpoint(create_custom_forward(_lowercase) , _lowercase , _lowercase)
else:
# middle
UpperCAmelCase_ = self.mid_block(_lowercase , _lowercase)
UpperCAmelCase_ = sample.to(_lowercase)
# up
for up_block in self.up_blocks:
UpperCAmelCase_ = up_block(_lowercase , _lowercase)
# post-process
if latent_embeds is None:
UpperCAmelCase_ = self.conv_norm_out(_lowercase)
else:
UpperCAmelCase_ = self.conv_norm_out(_lowercase , _lowercase)
UpperCAmelCase_ = self.conv_act(_lowercase)
UpperCAmelCase_ = self.conv_out(_lowercase)
return sample
class a_ ( nn.Module ):
def __init__( self :Union[str, Any] , _lowercase :Optional[int] , _lowercase :Dict , _lowercase :str , _lowercase :str=None , _lowercase :int="random" , _lowercase :Tuple=False , _lowercase :Tuple=True) -> Any:
super().__init__()
UpperCAmelCase_ = n_e
UpperCAmelCase_ = vq_embed_dim
UpperCAmelCase_ = beta
UpperCAmelCase_ = legacy
UpperCAmelCase_ = nn.Embedding(self.n_e , self.vq_embed_dim)
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e)
UpperCAmelCase_ = remap
if self.remap is not None:
self.register_buffer('''used''' , torch.tensor(np.load(self.remap)))
UpperCAmelCase_ = self.used.shape[0]
UpperCAmelCase_ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
UpperCAmelCase_ = self.re_embed
UpperCAmelCase_ = self.re_embed + 1
print(
f"Remapping {self.n_e} indices to {self.re_embed} indices. "
f"Using {self.unknown_index} for unknown indices.")
else:
UpperCAmelCase_ = n_e
UpperCAmelCase_ = sane_index_shape
def __a ( self :Dict , _lowercase :Union[str, Any]) -> Tuple:
UpperCAmelCase_ = inds.shape
assert len(_lowercase) > 1
UpperCAmelCase_ = inds.reshape(ishape[0] , -1)
UpperCAmelCase_ = self.used.to(_lowercase)
UpperCAmelCase_ = (inds[:, :, None] == used[None, None, ...]).long()
UpperCAmelCase_ = match.argmax(-1)
UpperCAmelCase_ = match.sum(2) < 1
if self.unknown_index == "random":
UpperCAmelCase_ = torch.randint(0 , self.re_embed , size=new[unknown].shape).to(device=new.device)
else:
UpperCAmelCase_ = self.unknown_index
return new.reshape(_lowercase)
def __a ( self :str , _lowercase :int) -> Optional[Any]:
UpperCAmelCase_ = inds.shape
assert len(_lowercase) > 1
UpperCAmelCase_ = inds.reshape(ishape[0] , -1)
UpperCAmelCase_ = self.used.to(_lowercase)
if self.re_embed > self.used.shape[0]: # extra token
UpperCAmelCase_ = 0 # simply set to zero
UpperCAmelCase_ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , _lowercase)
return back.reshape(_lowercase)
def __a ( self :Optional[int] , _lowercase :Union[str, Any]) -> Any:
# reshape z -> (batch, height, width, channel) and flatten
UpperCAmelCase_ = z.permute(0 , 2 , 3 , 1).contiguous()
UpperCAmelCase_ = z.view(-1 , self.vq_embed_dim)
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
UpperCAmelCase_ = torch.argmin(torch.cdist(_lowercase , self.embedding.weight) , dim=1)
UpperCAmelCase_ = self.embedding(_lowercase).view(z.shape)
UpperCAmelCase_ = None
UpperCAmelCase_ = None
# compute loss for embedding
if not self.legacy:
UpperCAmelCase_ = self.beta * torch.mean((z_q.detach() - z) ** 2) + torch.mean((z_q - z.detach()) ** 2)
else:
UpperCAmelCase_ = torch.mean((z_q.detach() - z) ** 2) + self.beta * torch.mean((z_q - z.detach()) ** 2)
# preserve gradients
UpperCAmelCase_ = z + (z_q - z).detach()
# reshape back to match original input shape
UpperCAmelCase_ = z_q.permute(0 , 3 , 1 , 2).contiguous()
if self.remap is not None:
UpperCAmelCase_ = min_encoding_indices.reshape(z.shape[0] , -1) # add batch axis
UpperCAmelCase_ = self.remap_to_used(_lowercase)
UpperCAmelCase_ = min_encoding_indices.reshape(-1 , 1) # flatten
if self.sane_index_shape:
UpperCAmelCase_ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3])
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __a ( self :Any , _lowercase :Tuple , _lowercase :Optional[Any]) -> int:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
UpperCAmelCase_ = indices.reshape(shape[0] , -1) # add batch axis
UpperCAmelCase_ = self.unmap_to_all(_lowercase)
UpperCAmelCase_ = indices.reshape(-1) # flatten again
# get quantized latent vectors
UpperCAmelCase_ = self.embedding(_lowercase)
if shape is not None:
UpperCAmelCase_ = z_q.view(_lowercase)
# reshape back to match original input shape
UpperCAmelCase_ = z_q.permute(0 , 3 , 1 , 2).contiguous()
return z_q
class a_ ( _snake_case ):
def __init__( self :Tuple , _lowercase :List[str] , _lowercase :Union[str, Any]=False) -> List[Any]:
UpperCAmelCase_ = parameters
UpperCAmelCase_ , UpperCAmelCase_ = torch.chunk(_lowercase , 2 , dim=1)
UpperCAmelCase_ = torch.clamp(self.logvar , -30.0 , 20.0)
UpperCAmelCase_ = deterministic
UpperCAmelCase_ = torch.exp(0.5 * self.logvar)
UpperCAmelCase_ = torch.exp(self.logvar)
if self.deterministic:
UpperCAmelCase_ = UpperCAmelCase_ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype)
def __a ( self :Optional[Any] , _lowercase :Optional[torch.Generator] = None) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
UpperCAmelCase_ = randn_tensor(
self.mean.shape , generator=_lowercase , device=self.parameters.device , dtype=self.parameters.dtype)
UpperCAmelCase_ = self.mean + self.std * sample
return x
def __a ( self :Tuple , _lowercase :int=None) -> List[Any]:
if self.deterministic:
return torch.Tensor([0.0])
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2) + self.var - 1.0 - self.logvar , dim=[1, 2, 3])
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __a ( self :Optional[int] , _lowercase :str , _lowercase :Dict=[1, 2, 3]) -> Optional[Any]:
if self.deterministic:
return torch.Tensor([0.0])
UpperCAmelCase_ = np.log(2.0 * np.pi)
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2) / self.var , dim=_lowercase)
def __a ( self :Optional[Any]) -> Optional[int]:
return self.mean
| 561 | 1 |
'''simple docstring'''
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class _UpperCamelCase ( A ):
'''simple docstring'''
def __init__( self : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase)
@torch.no_grad()
def __call__( self : Union[str, Any] , _lowerCAmelCase : int = 1 , _lowerCAmelCase : Optional[torch.Generator] = None , _lowerCAmelCase : int = 5_0 , _lowerCAmelCase : Optional[str] = "pil" , _lowerCAmelCase : bool = True , **_lowerCAmelCase : Optional[int] , ):
'''simple docstring'''
__lowercase =torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=_lowerCAmelCase , )
__lowercase =image.to(self.device)
# set step values
self.scheduler.set_timesteps(_lowerCAmelCase)
for t in self.progress_bar(self.scheduler.timesteps):
# 1. predict noise model_output
__lowercase =self.unet(_lowerCAmelCase , _lowerCAmelCase).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__lowercase =self.scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase).prev_sample
__lowercase =(image / 2 + 0.5).clamp(0 , 1)
__lowercase =image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
__lowercase =self.numpy_to_pil(_lowerCAmelCase)
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=_lowerCAmelCase), "This is a local test"
| 474 |
'''simple docstring'''
import os
# Precomputes a list of the 100 first triangular numbers
lowerCamelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def _A ( ):
"""simple docstring"""
__lowercase =os.path.dirname(os.path.realpath(_lowerCAmelCase ) )
__lowercase =os.path.join(_lowerCAmelCase , 'words.txt' )
__lowercase =''
with open(_lowerCAmelCase ) as f:
__lowercase =f.readline()
__lowercase =[word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
__lowercase =[
word
for word in [sum(ord(_lowerCAmelCase ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(_lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 474 | 1 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCamelCase_ ( A_ , A_ , A_ , A_ , A_ ):
# Load configuration defined in the metadata file
with open(A_ ) as metadata_file:
__lowerCamelCase = json.load(A_ )
__lowerCamelCase = LukeConfig(use_entity_aware_attention=A_ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
__lowerCamelCase = torch.load(A_ , map_location='''cpu''' )['''module''']
# Load the entity vocab file
__lowerCamelCase = load_original_entity_vocab(A_ )
# add an entry for [MASK2]
__lowerCamelCase = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
__lowerCamelCase = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
__lowerCamelCase = AddedToken('''<ent>''' , lstrip=A_ , rstrip=A_ )
__lowerCamelCase = AddedToken('''<ent2>''' , lstrip=A_ , rstrip=A_ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(A_ )
with open(os.path.join(A_ , '''tokenizer_config.json''' ) , '''r''' ) as f:
__lowerCamelCase = json.load(A_ )
__lowerCamelCase = '''MLukeTokenizer'''
with open(os.path.join(A_ , '''tokenizer_config.json''' ) , '''w''' ) as f:
json.dump(A_ , A_ )
with open(os.path.join(A_ , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(A_ , A_ )
__lowerCamelCase = MLukeTokenizer.from_pretrained(A_ )
# Initialize the embeddings of the special tokens
__lowerCamelCase = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
__lowerCamelCase = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
__lowerCamelCase = state_dict['''embeddings.word_embeddings.weight''']
__lowerCamelCase = word_emb[ent_init_index].unsqueeze(0 )
__lowerCamelCase = word_emb[enta_init_index].unsqueeze(0 )
__lowerCamelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
__lowerCamelCase = state_dict[bias_name]
__lowerCamelCase = decoder_bias[ent_init_index].unsqueeze(0 )
__lowerCamelCase = decoder_bias[enta_init_index].unsqueeze(0 )
__lowerCamelCase = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__lowerCamelCase = f'''encoder.layer.{layer_index}.attention.self.'''
__lowerCamelCase = state_dict[prefix + matrix_name]
__lowerCamelCase = state_dict[prefix + matrix_name]
__lowerCamelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__lowerCamelCase = state_dict['''entity_embeddings.entity_embeddings.weight''']
__lowerCamelCase = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
__lowerCamelCase = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
__lowerCamelCase = state_dict['''entity_predictions.bias''']
__lowerCamelCase = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
__lowerCamelCase = torch.cat([entity_prediction_bias, entity_mask_bias] )
__lowerCamelCase = LukeForMaskedLM(config=A_ ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
__lowerCamelCase = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
__lowerCamelCase = state_dict[key]
else:
__lowerCamelCase = state_dict[key]
__lowerCamelCase , __lowerCamelCase = model.load_state_dict(A_ , strict=A_ )
if set(A_ ) != {"luke.embeddings.position_ids"}:
raise ValueError(f'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(A_ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
__lowerCamelCase = MLukeTokenizer.from_pretrained(A_ , task='''entity_classification''' )
__lowerCamelCase = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
__lowerCamelCase = (0, 9)
__lowerCamelCase = tokenizer(A_ , entity_spans=[span] , return_tensors='''pt''' )
__lowerCamelCase = model(**A_ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__lowerCamelCase = torch.Size((1, 33, 7_68) )
__lowerCamelCase = torch.tensor([[0.0_892, 0.0_596, -0.2_819], [0.0_134, 0.1_199, 0.0_573], [-0.0_169, 0.0_927, 0.0_644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , A_ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
__lowerCamelCase = torch.Size((1, 1, 7_68) )
__lowerCamelCase = torch.tensor([[-0.1_482, 0.0_609, 0.0_322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
f''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , A_ , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
__lowerCamelCase = MLukeTokenizer.from_pretrained(A_ )
__lowerCamelCase = '''Tokyo is the capital of <mask>.'''
__lowerCamelCase = (24, 30)
__lowerCamelCase = tokenizer(A_ , entity_spans=[span] , return_tensors='''pt''' )
__lowerCamelCase = model(**A_ )
__lowerCamelCase = encoding['''input_ids'''][0].tolist()
__lowerCamelCase = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
__lowerCamelCase = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(A_ )
__lowerCamelCase = outputs.entity_logits[0][0].argmax().item()
__lowerCamelCase = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(A_ ) )
model.save_pretrained(A_ )
def lowerCamelCase_ ( A_ ):
__lowerCamelCase = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
__lowerCamelCase = [json.loads(A_ ) for line in open(A_ )]
__lowerCamelCase = {}
for entry in data:
__lowerCamelCase = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
__lowerCamelCase = entity_id
break
__lowerCamelCase = f'''{language}:{entity_name}'''
__lowerCamelCase = entity_id
return new_mapping
if __name__ == "__main__":
_UpperCamelCase : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
_UpperCamelCase : List[Any] =parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 720 |
'''simple docstring'''
from statistics import mean
import numpy as np
def lowerCamelCase_ ( A_ , A_ , A_ , A_ ):
__lowerCamelCase = 0
# Number of processes finished
__lowerCamelCase = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
__lowerCamelCase = [0] * no_of_process
# List to include calculation results
__lowerCamelCase = [0] * no_of_process
# Sort by arrival time.
__lowerCamelCase = [burst_time[i] for i in np.argsort(A_ )]
__lowerCamelCase = [process_name[i] for i in np.argsort(A_ )]
arrival_time.sort()
while no_of_process > finished_process_count:
__lowerCamelCase = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
__lowerCamelCase = arrival_time[i]
__lowerCamelCase = 0
# Index showing the location of the process being performed
__lowerCamelCase = 0
# Saves the current response ratio.
__lowerCamelCase = 0
for i in range(0 , A_ ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
__lowerCamelCase = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
__lowerCamelCase = temp
__lowerCamelCase = i
# Calculate the turn around time
__lowerCamelCase = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
__lowerCamelCase = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def lowerCamelCase_ ( A_ , A_ , A_ , A_ ):
__lowerCamelCase = [0] * no_of_process
for i in range(0 , A_ ):
__lowerCamelCase = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
_UpperCamelCase : List[Any] =5
_UpperCamelCase : str =["A", "B", "C", "D", "E"]
_UpperCamelCase : int =[1, 2, 3, 4, 5]
_UpperCamelCase : Tuple =[1, 2, 3, 4, 5]
_UpperCamelCase : int =calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
_UpperCamelCase : Tuple =calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("Process name \tArrival time \tBurst time \tTurn around time \tWaiting time")
for i in range(0, no_of_process):
print(
f'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
f'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(f'''average waiting time : {mean(waiting_time):.5f}''')
print(f'''average turn around time : {mean(turn_around_time):.5f}''')
| 575 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class A__ :
def __init__( self : Dict , _a : Optional[Any] , _a : Optional[int]=13 , _a : List[Any]=7 , _a : Dict=True , _a : Any=True , _a : int=True , _a : int=True , _a : Any=99 , _a : str=32 , _a : str=2 , _a : Optional[Any]=4 , _a : Optional[int]=37 , _a : Union[str, Any]="gelu" , _a : Dict=0.1 , _a : Optional[Any]=0.1 , _a : Optional[Any]=512 , _a : List[str]=16 , _a : Optional[int]=2 , _a : Optional[Any]=0.02 , _a : List[Any]=3 , _a : List[Any]=4 , _a : Optional[Any]=None , _a : Any=0 , ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =seq_length
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_input_mask
_SCREAMING_SNAKE_CASE =use_token_type_ids
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =vocab_size
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =max_position_embeddings
_SCREAMING_SNAKE_CASE =type_vocab_size
_SCREAMING_SNAKE_CASE =type_sequence_label_size
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =num_labels
_SCREAMING_SNAKE_CASE =num_choices
_SCREAMING_SNAKE_CASE =scope
_SCREAMING_SNAKE_CASE =projection_dim
def A ( self : Any ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE =None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
_SCREAMING_SNAKE_CASE =random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE =None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE =BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , )
_SCREAMING_SNAKE_CASE =DPRConfig(projection_dim=self.projection_dim , **config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : Dict , _a : Optional[int] , _a : Optional[Any] , _a : Optional[int] , _a : Optional[Any] , _a : Any , _a : Any , _a : Optional[int] ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFDPRContextEncoder(config=A_ )
_SCREAMING_SNAKE_CASE =model(A_ , attention_mask=A_ , token_type_ids=A_ )
_SCREAMING_SNAKE_CASE =model(A_ , token_type_ids=A_ )
_SCREAMING_SNAKE_CASE =model(A_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def A ( self : Any , _a : List[str] , _a : int , _a : List[str] , _a : List[Any] , _a : str , _a : Tuple , _a : List[Any] ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFDPRQuestionEncoder(config=A_ )
_SCREAMING_SNAKE_CASE =model(A_ , attention_mask=A_ , token_type_ids=A_ )
_SCREAMING_SNAKE_CASE =model(A_ , token_type_ids=A_ )
_SCREAMING_SNAKE_CASE =model(A_ )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) )
def A ( self : Optional[Any] , _a : List[str] , _a : List[Any] , _a : Optional[int] , _a : Optional[int] , _a : str , _a : Union[str, Any] , _a : int ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFDPRReader(config=A_ )
_SCREAMING_SNAKE_CASE =model(A_ , attention_mask=A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) )
def A ( self : str ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) =config_and_inputs
_SCREAMING_SNAKE_CASE ={'input_ids': input_ids}
return config, inputs_dict
@require_tf
class A__ ( A__ , A__ , unittest.TestCase ):
A__ = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
A__ = {'feature-extraction': TFDPRQuestionEncoder} if is_tf_available() else {}
A__ = False
A__ = False
A__ = False
A__ = False
A__ = False
def A ( self : Dict ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFDPRModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=A_ , hidden_size=37 )
def A ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : int ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*A_ )
def A ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*A_ )
def A ( self : Union[str, Any] ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*A_ )
@slow
def A ( self : str ) -> Union[str, Any]:
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =TFDPRContextEncoder.from_pretrained(A_ )
self.assertIsNotNone(A_ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =TFDPRContextEncoder.from_pretrained(A_ )
self.assertIsNotNone(A_ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =TFDPRQuestionEncoder.from_pretrained(A_ )
self.assertIsNotNone(A_ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =TFDPRReader.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_tf
class A__ ( unittest.TestCase ):
@slow
def A ( self : int ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base' )
_SCREAMING_SNAKE_CASE =tf.constant(
[[101, 7592, 1010, 2003, 2026, 3899, 1_0140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP]
_SCREAMING_SNAKE_CASE =model(A_ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
_SCREAMING_SNAKE_CASE =tf.constant(
[
[
0.03_23_62_53,
0.12_75_33_35,
0.16_81_85_09,
0.00_27_97_86,
0.3_89_69_33,
0.24_26_49_45,
0.2_17_89_71,
-0.02_33_52_27,
-0.08_48_19_59,
-0.14_32_41_17,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 405 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class A( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , A_ : Dict , A_ : int=7 , A_ : Any=3 , A_ : List[str]=30 , A_ : Union[str, Any]=400 , A_ : List[str]=True , A_ : int=None , A_ : Any=True , A_ : str=1 / 255 , A_ : int=True , A_ : List[Any]=[0.5, 0.5, 0.5] , A_ : Union[str, Any]=[0.5, 0.5, 0.5] , A_ : Union[str, Any]=True , ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = min_resolution
lowerCamelCase_ = max_resolution
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_factor
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean
lowerCamelCase_ = image_std
lowerCamelCase_ = do_pad
def a__ ( self : Tuple ) -> Dict:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def a__ ( self : Union[str, Any] , A_ : Dict , A_ : Any=False ) -> Union[str, Any]:
"""simple docstring"""
if not batched:
lowerCamelCase_ = image_inputs[0]
if isinstance(A_ , Image.Image ):
lowerCamelCase_ , lowerCamelCase_ = image.size
else:
lowerCamelCase_ , lowerCamelCase_ = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase_ = int(self.size['shortest_edge'] * h / w )
lowerCamelCase_ = self.size['shortest_edge']
elif w > h:
lowerCamelCase_ = self.size['shortest_edge']
lowerCamelCase_ = int(self.size['shortest_edge'] * w / h )
else:
lowerCamelCase_ = self.size['shortest_edge']
lowerCamelCase_ = self.size['shortest_edge']
else:
lowerCamelCase_ = []
for image in image_inputs:
lowerCamelCase_ , lowerCamelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase_ = max(A_ , key=lambda A_ : item[0] )[0]
lowerCamelCase_ = max(A_ , key=lambda A_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = DetrImageProcessor if is_vision_available() else None
def a__ ( self : List[Any] ) -> str:
"""simple docstring"""
lowerCamelCase_ = DetrImageProcessingTester(self )
@property
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self : Dict ) -> int:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'image_mean' ) )
self.assertTrue(hasattr(A_ , 'image_std' ) )
self.assertTrue(hasattr(A_ , 'do_normalize' ) )
self.assertTrue(hasattr(A_ , 'do_rescale' ) )
self.assertTrue(hasattr(A_ , 'rescale_factor' ) )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'do_pad' ) )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad , A_ )
lowerCamelCase_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A_ )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , A_ )
def a__ ( self : Dict ) -> Any:
"""simple docstring"""
pass
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
lowerCamelCase_ = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ = image_processing(A_ , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
lowerCamelCase_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_ = image_processing(A_ , return_tensors='pt' ).pixel_values
lowerCamelCase_ , lowerCamelCase_ = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def a__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
lowerCamelCase_ = json.loads(f.read() )
lowerCamelCase_ = {'image_id': 39769, 'annotations': target}
# encode them
lowerCamelCase_ = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
lowerCamelCase_ = image_processing(images=A_ , annotations=A_ , return_tensors='pt' )
# verify pixel values
lowerCamelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , A_ )
lowerCamelCase_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
lowerCamelCase_ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , A_ ) )
# verify boxes
lowerCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , A_ )
lowerCamelCase_ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , A_ , atol=1E-3 ) )
# verify image_id
lowerCamelCase_ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , A_ ) )
# verify is_crowd
lowerCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , A_ ) )
# verify class_labels
lowerCamelCase_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , A_ ) )
# verify orig_size
lowerCamelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , A_ ) )
# verify size
lowerCamelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , A_ ) )
@slow
def a__ ( self : str ) -> Any:
"""simple docstring"""
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
lowerCamelCase_ = json.loads(f.read() )
lowerCamelCase_ = {'file_name': '000000039769.png', 'image_id': 39769, 'segments_info': target}
lowerCamelCase_ = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
lowerCamelCase_ = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
lowerCamelCase_ = image_processing(images=A_ , annotations=A_ , masks_path=A_ , return_tensors='pt' )
# verify pixel values
lowerCamelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape , A_ )
lowerCamelCase_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , A_ , atol=1E-4 ) )
# verify area
lowerCamelCase_ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , A_ ) )
# verify boxes
lowerCamelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , A_ )
lowerCamelCase_ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , A_ , atol=1E-3 ) )
# verify image_id
lowerCamelCase_ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , A_ ) )
# verify is_crowd
lowerCamelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , A_ ) )
# verify class_labels
lowerCamelCase_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , A_ ) )
# verify masks
lowerCamelCase_ = 822873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , A_ )
# verify orig_size
lowerCamelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , A_ ) )
# verify size
lowerCamelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , A_ ) )
| 70 | 0 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_lowercase : Optional[Any] = LxmertTokenizer
_lowercase : Dict = LxmertTokenizerFast
_lowercase : List[str] = True
_lowercase : Union[str, Any] = True
def _SCREAMING_SNAKE_CASE ( self: Tuple ):
'''simple docstring'''
super().setUp()
__magic_name__ = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE ( self: Dict , __UpperCamelCase: int ):
'''simple docstring'''
__magic_name__ = 'UNwant\u00E9d,running'
__magic_name__ = 'unwanted, running'
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self: Dict ):
'''simple docstring'''
__magic_name__ = self.tokenizer_class(self.vocab_file )
__magic_name__ = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_UpperCAmelCase , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [7, 4, 5, 10, 8, 9] )
def _SCREAMING_SNAKE_CASE ( self: Any ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__magic_name__ = self.get_tokenizer()
__magic_name__ = self.get_rust_tokenizer()
__magic_name__ = 'I was born in 92000, and this is falsé.'
__magic_name__ = tokenizer.tokenize(_UpperCAmelCase )
__magic_name__ = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__magic_name__ = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
__magic_name__ = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
__magic_name__ = self.get_rust_tokenizer()
__magic_name__ = tokenizer.encode(_UpperCAmelCase )
__magic_name__ = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
| 714 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class __UpperCamelCase ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
'''simple docstring'''
__magic_name__ = 'laion/clap-htsat-unfused'
__magic_name__ = tempfile.mkdtemp()
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , **__UpperCamelCase: List[Any] ):
'''simple docstring'''
return RobertaTokenizer.from_pretrained(self.checkpoint , **__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: int , **__UpperCamelCase: Dict ):
'''simple docstring'''
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Optional[int] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
'''simple docstring'''
__magic_name__ = self.get_tokenizer()
__magic_name__ = self.get_feature_extractor()
__magic_name__ = ClapProcessor(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
__magic_name__ = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCamelCase )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
'''simple docstring'''
__magic_name__ = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
__magic_name__ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__magic_name__ = self.get_feature_extractor(do_normalize=__UpperCamelCase , padding_value=1.0 )
__magic_name__ = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCamelCase )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: List[str] ):
'''simple docstring'''
__magic_name__ = self.get_feature_extractor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = ClapProcessor(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase )
__magic_name__ = floats_list((3, 10_00) )
__magic_name__ = feature_extractor(__UpperCamelCase , return_tensors='np' )
__magic_name__ = processor(audios=__UpperCamelCase , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _SCREAMING_SNAKE_CASE ( self: List[Any] ):
'''simple docstring'''
__magic_name__ = self.get_feature_extractor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = ClapProcessor(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase )
__magic_name__ = 'This is a test string'
__magic_name__ = processor(text=__UpperCamelCase )
__magic_name__ = tokenizer(__UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
'''simple docstring'''
__magic_name__ = self.get_feature_extractor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = ClapProcessor(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase )
__magic_name__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__magic_name__ = processor.batch_decode(__UpperCamelCase )
__magic_name__ = tokenizer.batch_decode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self: Any ):
'''simple docstring'''
__magic_name__ = self.get_feature_extractor()
__magic_name__ = self.get_tokenizer()
__magic_name__ = ClapProcessor(tokenizer=__UpperCamelCase , feature_extractor=__UpperCamelCase )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
| 184 | 0 |
"""simple docstring"""
from math import pow
def __snake_case ( _lowercase ,_lowercase ,_lowercase ,_lowercase ,_lowercase ,):
"""simple docstring"""
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
UpperCamelCase = int(pow(_lowercase ,_lowercase ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
UpperCamelCase , UpperCamelCase = backtrack(
_lowercase ,_lowercase ,current_number + 1 ,_lowercase ,_lowercase )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
UpperCamelCase , UpperCamelCase = backtrack(
_lowercase ,_lowercase ,current_number + 1 ,_lowercase ,_lowercase )
return current_sum, solutions_count
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
'''Invalid input\n'''
'''needed_sum must be between 1 and 1000, power between 2 and 10.''' )
return backtrack(_lowercase ,_lowercase ,1 ,0 ,0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod() | 34 | import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def A__ ( ):
SCREAMING_SNAKE_CASE__: Union[str, Any]= argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=snake_case_ , default=snake_case_ , required=snake_case_ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=snake_case_ , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=snake_case_ , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=snake_case_ , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=snake_case_ , default=0 , help='''cuda_id.''' , )
SCREAMING_SNAKE_CASE__: Any= parser.parse_args()
return args
def A__ ( snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : List[str] ):
if not len(snake_case_ ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: str= imgs[0].size
SCREAMING_SNAKE_CASE__: Optional[Any]= Image.new('''RGB''' , size=(cols * w, rows * h) )
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Union[str, Any]= grid.size
for i, img in enumerate(snake_case_ ):
grid.paste(snake_case_ , box=(i % cols * w, i // cols * h) )
return grid
def A__ ( snake_case_ : Tuple , snake_case_ : str="robotic cat with wings" , snake_case_ : Optional[Any]=7.5 , snake_case_ : Dict=50 , snake_case_ : Union[str, Any]=1 , snake_case_ : Tuple=42 , ):
SCREAMING_SNAKE_CASE__: List[Any]= torch.Generator(pipeline.device ).manual_seed(snake_case_ )
SCREAMING_SNAKE_CASE__: Optional[int]= pipeline(
snake_case_ , guidance_scale=snake_case_ , num_inference_steps=snake_case_ , generator=snake_case_ , num_images_per_prompt=snake_case_ , ).images
SCREAMING_SNAKE_CASE__: str= int(math.sqrt(snake_case_ ) )
SCREAMING_SNAKE_CASE__: Optional[Any]= image_grid(snake_case_ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
lowercase_ : List[str] = parse_args()
# Load models and create wrapper for stable diffusion
lowercase_ : List[str] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
lowercase_ : List[Any] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
lowercase_ : Tuple = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
lowercase_ : List[Any] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
lowercase_ : Dict = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowercase_ : str = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
lowercase_ : Union[str, Any] = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
lowercase_ : Any = unet.to(torch.device('cuda', args.cuda_id))
lowercase_ : str = pipeline.to(unet.device)
lowercase_ , lowercase_ : Dict = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
lowercase_ : List[Any] = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 64 | 0 |
'''simple docstring'''
import os
def A_ ( ):
SCREAMING_SNAKE_CASE:Dict = os.path.join(os.path.dirname(snake_case ) , "num.txt" )
with open(snake_case ) as file_hand:
return str(sum(int(snake_case ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 465 |
'''simple docstring'''
import random
def A_ ( snake_case , snake_case , snake_case = False ):
SCREAMING_SNAKE_CASE:dict = {i: [] for i in range(snake_case )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(snake_case )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(snake_case ):
for j in range(i + 1 , snake_case ):
if random.random() < probability:
graph[i].append(snake_case )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(snake_case )
return graph
def A_ ( snake_case ):
return {
i: [j for j in range(snake_case ) if i != j] for i in range(snake_case )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 465 | 1 |