code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
lowerCAmelCase__ : Optional[Union[str, Path]] = None
lowerCAmelCase__ : bool = False
lowerCAmelCase__ : bool = False
lowerCAmelCase__ : bool = False
lowerCAmelCase__ : Optional[Dict] = None
lowerCAmelCase__ : Optional[str] = None
lowerCAmelCase__ : bool = False
lowerCAmelCase__ : bool = False
lowerCAmelCase__ : bool = False
lowerCAmelCase__ : bool = True
lowerCAmelCase__ : Optional[int] = None
lowerCAmelCase__ : int = 1
lowerCAmelCase__ : Optional[Union[str, bool]] = None
lowerCAmelCase__ : bool = False
lowerCAmelCase__ : Optional[Dict] = None
lowerCAmelCase__ : Optional[str] = None
def _UpperCAmelCase ( self: Any ) -> "DownloadConfig":
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(__lowerCAmelCase ) for k, v in self.__dict__.items()} )
| 221 | import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
a_ = 250004
a_ = 250020
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = MBartaaTokenizer
lowerCAmelCase__ : List[Any] = MBartaaTokenizerFast
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Optional[Any] = True
def _UpperCAmelCase ( self: int ) -> List[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase = MBartaaTokenizer(__lowerCAmelCase , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=__lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self: Dict ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = "<s>"
__UpperCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) , __lowerCAmelCase )
def _UpperCAmelCase ( self: List[str] ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(__lowerCAmelCase ) , 1_054 )
def _UpperCAmelCase ( self: Any ) -> List[str]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_054 )
def _UpperCAmelCase ( self: Optional[Any] ) -> Any:
'''simple docstring'''
__UpperCAmelCase = MBartaaTokenizer(__lowerCAmelCase , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=__lowerCAmelCase )
__UpperCAmelCase = tokenizer.tokenize("This is a test" )
self.assertListEqual(__lowerCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__UpperCAmelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCAmelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
__UpperCAmelCase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def _UpperCAmelCase ( self: List[str] ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = {"input_ids": [[250_004, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [250_004, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250_004, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name="facebook/mbart-large-50" , revision="d3913889c59cd5c9e456b269c376325eabad57e2" , )
def _UpperCAmelCase ( self: Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__UpperCAmelCase = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart50", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__UpperCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase )
__UpperCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
__UpperCAmelCase = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase )
# Checks everything loads correctly in the same way
__UpperCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase )
__UpperCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowerCAmelCase )
# Save tokenizer rust, legacy_format=True
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase )
__UpperCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase )
# Checks everything loads correctly in the same way
__UpperCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase )
__UpperCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase ) )
shutil.rmtree(__lowerCAmelCase )
# Save tokenizer rust, legacy_format=False
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase )
__UpperCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__UpperCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase )
__UpperCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase ) )
shutil.rmtree(__lowerCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : Dict = 'facebook/mbart-large-50-one-to-many-mmt'
lowerCAmelCase__ : Optional[int] = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
lowerCAmelCase__ : Union[str, Any] = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
lowerCAmelCase__ : Optional[int] = [EN_CODE, 8274, 12_7873, 2_5916, 7, 8622, 2071, 438, 6_7485, 53, 18_7895, 23, 5_1712, 2]
@classmethod
def _UpperCAmelCase ( cls: Any ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" )
__UpperCAmelCase = 1
return cls
def _UpperCAmelCase ( self: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 250_020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["mr_IN"] , 250_038 )
def _UpperCAmelCase ( self: Any ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __lowerCAmelCase )
def _UpperCAmelCase ( self: str ) -> Any:
'''simple docstring'''
self.assertIn(__lowerCAmelCase , self.tokenizer.all_special_ids )
__UpperCAmelCase = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
__UpperCAmelCase = self.tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
__UpperCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , __lowerCAmelCase )
def _UpperCAmelCase ( self: Union[str, Any] ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , __lowerCAmelCase )
__UpperCAmelCase = 10
__UpperCAmelCase = self.tokenizer(__lowerCAmelCase , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase ).input_ids[0]
self.assertEqual(ids[0] , __lowerCAmelCase )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase )
def _UpperCAmelCase ( self: int ) -> Dict:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [250_053, 250_001] )
def _UpperCAmelCase ( self: str ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__lowerCAmelCase )
__UpperCAmelCase = MBartaaTokenizer.from_pretrained(__lowerCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowerCAmelCase )
@require_torch
def _UpperCAmelCase ( self: Any ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowerCAmelCase , return_tensors="pt" )
__UpperCAmelCase = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def _UpperCAmelCase ( self: Union[str, Any] ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
__UpperCAmelCase = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __lowerCAmelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def _UpperCAmelCase ( self: Union[str, Any] ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer(self.src_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=3 , return_tensors="pt" )
__UpperCAmelCase = self.tokenizer(
text_target=self.tgt_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=10 , return_tensors="pt" )
__UpperCAmelCase = targets["input_ids"]
__UpperCAmelCase = shift_tokens_right(__lowerCAmelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _UpperCAmelCase ( self: str ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , {
# en_XX, A, test, EOS
"input_ids": [[250_004, 62, 3_034, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 250_001,
} , )
| 221 | 1 |
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowercase__ ='src/diffusers'
lowercase__ ='.'
# This is to make sure the diffusers module imported is the one in the repo.
lowercase__ =importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowercase__ =spec.loader.load_module()
def UpperCamelCase_ ( A__ , A__ ):
return line.startswith(A__ ) or len(A__ ) <= 1 or re.search(r"""^\s*\)(\s*->.*:|:)\s*$""" , A__ ) is not None
def UpperCamelCase_ ( A__ ):
a_ = object_name.split(""".""" )
a_ = 0
# First let's find the module where our object lives.
a_ = parts[i]
while i < len(A__ ) and not os.path.isfile(os.path.join(A__ , F'''{module}.py''' ) ):
i += 1
if i < len(A__ ):
a_ = os.path.join(A__ , parts[i] )
if i >= len(A__ ):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(A__ , F'''{module}.py''' ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
a_ = f.readlines()
# Now let's find the class / func in the code!
a_ = """"""
a_ = 0
for name in parts[i + 1 :]:
while (
line_index < len(A__ ) and re.search(rF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(A__ ):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
a_ = line_index
while line_index < len(A__ ) and _should_continue(lines[line_index] , A__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
a_ = lines[start_index:line_index]
return "".join(A__ )
lowercase__ =re.compile(r'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
lowercase__ =re.compile(r'^\s*(\S+)->(\S+)(\s+.*|$)')
lowercase__ =re.compile(r'<FILL\s+[^>]*>')
def UpperCamelCase_ ( A__ ):
a_ = code.split("""\n""" )
a_ = 0
while idx < len(A__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(A__ ):
return re.search(r"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def UpperCamelCase_ ( A__ ):
a_ = len(get_indent(A__ ) ) > 0
if has_indent:
a_ = F'''class Bla:\n{code}'''
a_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=A__ )
a_ = black.format_str(A__ , mode=A__ )
a_ , a_ = style_docstrings_in_code(A__ )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def UpperCamelCase_ ( A__ , A__=False ):
with open(A__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
a_ = f.readlines()
a_ = []
a_ = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(A__ ):
a_ = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
a_ , a_ , a_ = search.groups()
a_ = find_code_in_diffusers(A__ )
a_ = get_indent(A__ )
a_ = line_index + 1 if indent == theoretical_indent else line_index + 2
a_ = theoretical_indent
a_ = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
a_ = True
while line_index < len(A__ ) and should_continue:
line_index += 1
if line_index >= len(A__ ):
break
a_ = lines[line_index]
a_ = _should_continue(A__ , A__ ) and re.search(F'''^{indent}# End copy''' , A__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
a_ = lines[start_index:line_index]
a_ = """""".join(A__ )
# Remove any nested `Copied from` comments to avoid circular copies
a_ = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(A__ ) is None]
a_ = """\n""".join(A__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(A__ ) > 0:
a_ = replace_pattern.replace("""with""" , """""" ).split(""",""" )
a_ = [_re_replace_pattern.search(A__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
a_ , a_ , a_ = pattern.groups()
a_ = re.sub(A__ , A__ , A__ )
if option.strip() == "all-casing":
a_ = re.sub(obja.lower() , obja.lower() , A__ )
a_ = re.sub(obja.upper() , obja.upper() , A__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
a_ = blackify(lines[start_index - 1] + theoretical_code )
a_ = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
a_ = lines[:start_index] + [theoretical_code] + lines[line_index:]
a_ = start_index + 1
if overwrite and len(A__ ) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''' )
with open(A__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(A__ )
return diffs
def UpperCamelCase_ ( A__ = False ):
a_ = glob.glob(os.path.join(A__ , """**/*.py""" ) , recursive=A__ )
a_ = []
for filename in all_files:
a_ = is_copy_consistent(A__ , A__ )
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(A__ ) > 0:
a_ = """\n""".join(A__ )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowercase__ =parser.parse_args()
check_copies(args.fix_and_overwrite)
| 511 |
'''simple docstring'''
def UpperCamelCase_ ( A__ ):
if n_term == "":
return []
a_ = []
for temp in range(int(A__ ) ):
series.append(F'''1/{temp + 1}''' if series else """1""" )
return series
if __name__ == "__main__":
lowercase__ =input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 511 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a__ : Any = logging.get_logger(__name__)
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''pixel_values''']
def __init__( self , lowercase = True , lowercase = None , lowercase = 0.9 , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = None , lowercase = 1 / 2_5_5 , lowercase = True , lowercase = True , lowercase = None , lowercase = None , **lowercase , ) -> None:
super().__init__(**lowercase )
__UpperCamelCase = size if size is not None else {"""shortest_edge""": 2_2_4}
__UpperCamelCase = get_size_dict(lowercase , default_to_square=lowercase )
__UpperCamelCase = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
__UpperCamelCase = get_size_dict(lowercase , param_name="""crop_size""" )
__UpperCamelCase = do_resize
__UpperCamelCase = size
__UpperCamelCase = crop_pct
__UpperCamelCase = resample
__UpperCamelCase = do_center_crop
__UpperCamelCase = crop_size
__UpperCamelCase = do_rescale
__UpperCamelCase = rescale_factor
__UpperCamelCase = do_normalize
__UpperCamelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__UpperCamelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __lowerCamelCase ( self , lowercase , lowercase , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ) -> np.ndarray:
__UpperCamelCase = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f"size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
if crop_pct is not None:
if "shortest_edge" in size:
__UpperCamelCase = int(size["""shortest_edge"""] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
__UpperCamelCase = int(size["""height"""] / crop_pct )
else:
__UpperCamelCase = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct ))
else:
raise ValueError("""Invalid size for resize: {}""".format(lowercase ) )
__UpperCamelCase = get_resize_output_image_size(lowercase , size=lowercase , default_to_square=lowercase )
else:
if "shortest_edge" in size:
__UpperCamelCase = get_resize_output_image_size(lowercase , size=size["""shortest_edge"""] , default_to_square=lowercase )
elif "height" in size and "width" in size:
__UpperCamelCase = (size["""height"""], size["""width"""])
else:
raise ValueError("""Invalid size for resize: {}""".format(lowercase ) )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray:
__UpperCamelCase = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"size must contain 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(lowercase , size=(size["""height"""], size["""width"""]) , data_format=lowercase , **lowercase )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> str:
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray:
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def __lowerCamelCase ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ) -> PIL.Image.Image:
__UpperCamelCase = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase = crop_pct if crop_pct is not None else self.crop_pct
__UpperCamelCase = resample if resample is not None else self.resample
__UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase = image_mean if image_mean is not None else self.image_mean
__UpperCamelCase = image_std if image_std is not None else self.image_std
__UpperCamelCase = size if size is not None else self.size
__UpperCamelCase = get_size_dict(lowercase , default_to_square=lowercase )
__UpperCamelCase = crop_size if crop_size is not None else self.crop_size
__UpperCamelCase = get_size_dict(lowercase , param_name="""crop_size""" )
__UpperCamelCase = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_pct is None:
raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__UpperCamelCase = [to_numpy_array(lowercase ) for image in images]
if do_resize:
__UpperCamelCase = [self.resize(image=lowercase , size=lowercase , crop_pct=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
__UpperCamelCase = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
__UpperCamelCase = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
__UpperCamelCase = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
__UpperCamelCase = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
__UpperCamelCase = {"""pixel_values""": images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
| 601 |
'''simple docstring'''
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = []
__UpperCamelCase = 1
while len(__A ) < 1E6:
constant.append(str(__A ) )
i += 1
__UpperCamelCase = """""".join(__A )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9_999] )
* int(constant[99_999] )
* int(constant[999_999] )
)
if __name__ == "__main__":
print(solution())
| 601 | 1 |
import sys
from collections import defaultdict
class A :
'''simple docstring'''
def __init__( self : Dict ) -> Any:
"""simple docstring"""
A__ = []
def a_ ( self : int , __lowerCAmelCase : List[Any] ) -> int:
"""simple docstring"""
return self.node_position[vertex]
def a_ ( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
A__ = pos
def a_ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
A__ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
A__ = 2 * start + 1
else:
A__ = 2 * start + 2
if heap[smallest_child] < heap[start]:
A__ , A__ = heap[smallest_child], positions[smallest_child]
A__ , A__ = (
heap[start],
positions[start],
)
A__ , A__ = temp, tempa
A__ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __lowerCAmelCase )
self.top_to_bottom(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def a_ ( self : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : int ) -> List[Any]:
"""simple docstring"""
A__ = position[index]
while index != 0:
A__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
A__ = heap[parent]
A__ = position[parent]
self.set_position(position[parent] , __lowerCAmelCase )
else:
A__ = val
A__ = temp
self.set_position(__lowerCAmelCase , __lowerCAmelCase )
break
A__ = parent
else:
A__ = val
A__ = temp
self.set_position(__lowerCAmelCase , 0 )
def a_ ( self : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] ) -> str:
"""simple docstring"""
A__ = len(__lowerCAmelCase ) // 2 - 1
for i in range(__lowerCAmelCase , -1 , -1 ):
self.top_to_bottom(__lowerCAmelCase , __lowerCAmelCase , len(__lowerCAmelCase ) , __lowerCAmelCase )
def a_ ( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
A__ = positions[0]
A__ = sys.maxsize
self.top_to_bottom(__lowerCAmelCase , 0 , len(__lowerCAmelCase ) , __lowerCAmelCase )
return temp
def __lowerCamelCase ( __a :Any ) -> Dict:
"""simple docstring"""
A__ = Heap()
A__ = [0] * len(__a )
A__ = [-1] * len(__a ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
A__ = [] # Heap of Distance of vertices from their neighboring vertex
A__ = []
for vertex in range(len(__a ) ):
distance_tv.append(sys.maxsize )
positions.append(__a )
heap.node_position.append(__a )
A__ = []
A__ = 1
A__ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
A__ = 0
A__ = distance
heap.heapify(__a , __a )
for _ in range(1 , len(__a ) ):
A__ = heap.delete_minimum(__a , __a )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
A__ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__a )]
):
A__ = distance
heap.bottom_to_top(
__a , heap.get_position(__a ) , __a , __a )
A__ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A : Any = int(input('''Enter number of edges: ''').strip())
A : Any = defaultdict(list)
for _ in range(edges_number):
A : int = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 714 |
from __future__ import annotations
A : Optional[int] = 8.988e9 # units = N * m^s * C^-2
def __lowerCamelCase ( __a :float , __a :float , __a :float , __a :float ) -> dict[str, float]:
"""simple docstring"""
A__ = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if distance < 0:
raise ValueError("""Distance cannot be negative""" )
if force == 0:
A__ = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
A__ = abs(__a ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
A__ = abs(__a ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
A__ = (COULOMBS_CONSTANT * charge_product / abs(__a )) ** 0.5
return {"distance": distance}
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 247 | 0 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
_UpperCamelCase = logging.get_logger(__name__)
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__(self , *__a , **__a ) -> None:
"""simple docstring"""
warnings.warn(
'The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use OwlViTImageProcessor instead.' , __a , )
super().__init__(*__a , **__a )
| 146 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'''sail/poolformer_s12''': '''https://huggingface.co/sail/poolformer_s12/resolve/main/config.json''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """poolformer"""
def __init__(self , __a=3 , __a=16 , __a=16 , __a=3 , __a=4.0 , __a=[2, 2, 6, 2] , __a=[64, 128, 320, 512] , __a=[7, 3, 3, 3] , __a=[4, 2, 2, 2] , __a=[2, 1, 1, 1] , __a=4 , __a=0.0 , __a="gelu" , __a=True , __a=1E-5 , __a=0.02 , **__a , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = stride
UpperCAmelCase__ = padding
UpperCAmelCase__ = pool_size
UpperCAmelCase__ = hidden_sizes
UpperCAmelCase__ = mlp_ratio
UpperCAmelCase__ = depths
UpperCAmelCase__ = patch_sizes
UpperCAmelCase__ = strides
UpperCAmelCase__ = num_encoder_blocks
UpperCAmelCase__ = drop_path_rate
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = use_layer_scale
UpperCAmelCase__ = layer_scale_init_value
UpperCAmelCase__ = initializer_range
super().__init__(**__a )
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = version.parse("""1.11""" )
@property
def UpperCamelCase__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCamelCase__ (self ) -> float:
"""simple docstring"""
return 2E-3
| 146 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase : Optional[Any] = {
'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'],
'tokenization_electra': ['ElectraTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[Any] = ['ElectraTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : str = [
'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'ElectraForCausalLM',
'ElectraForMaskedLM',
'ElectraForMultipleChoice',
'ElectraForPreTraining',
'ElectraForQuestionAnswering',
'ElectraForSequenceClassification',
'ElectraForTokenClassification',
'ElectraModel',
'ElectraPreTrainedModel',
'load_tf_weights_in_electra',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[Any] = [
'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFElectraForMaskedLM',
'TFElectraForMultipleChoice',
'TFElectraForPreTraining',
'TFElectraForQuestionAnswering',
'TFElectraForSequenceClassification',
'TFElectraForTokenClassification',
'TFElectraModel',
'TFElectraPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Dict = [
'FlaxElectraForCausalLM',
'FlaxElectraForMaskedLM',
'FlaxElectraForMultipleChoice',
'FlaxElectraForPreTraining',
'FlaxElectraForQuestionAnswering',
'FlaxElectraForSequenceClassification',
'FlaxElectraForTokenClassification',
'FlaxElectraModel',
'FlaxElectraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 9 |
'''simple docstring'''
from manim import *
class UpperCamelCase__ (a ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
lowerCamelCase__ = Rectangle(height=0.5 ,width=0.5 )
lowerCamelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = VGroup(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = Text("""CPU""" ,font_size=24 )
lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowerCAmelCase )
lowerCamelCase__ = [mem.copy() for i in range(1 )]
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = Text("""GPU""" ,font_size=24 )
lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase )
gpu.align_to(_lowerCAmelCase ,_lowerCAmelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(_lowerCAmelCase )
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = Text("""Model""" ,font_size=24 )
lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(_lowerCAmelCase ,run_time=1 ) ,Create(_lowerCAmelCase ,run_time=1 ) ,Create(_lowerCAmelCase ,run_time=1 ) ,)
lowerCamelCase__ = MarkupText(
F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' ,font_size=24 ,)
lowerCamelCase__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase__ = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCAmelCase ,run_time=2.5 ) ,Write(_lowerCAmelCase ) ,Write(_lowerCAmelCase ) )
self.add(_lowerCAmelCase )
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
for i, rect in enumerate(_lowerCAmelCase ):
lowerCamelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowerCAmelCase ,opacity=0.7 )
cpu_target.move_to(_lowerCAmelCase )
cpu_target.generate_target()
lowerCamelCase__ = 0.46 / 4
lowerCamelCase__ = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.02 ,direction=_lowerCAmelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target ,direction=_lowerCAmelCase ,buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=_lowerCAmelCase ,buff=0.0 )
cpu_targs.append(_lowerCAmelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_lowerCAmelCase ) )
second_animations.append(MoveToTarget(_lowerCAmelCase ,run_time=1.5 ) )
self.play(*_lowerCAmelCase )
self.play(*_lowerCAmelCase )
self.wait()
| 9 | 1 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> np.array:
_UpperCAmelCase = f"""{sampling_rate}"""
_UpperCAmelCase = "1"
_UpperCAmelCase = "f32le"
_UpperCAmelCase = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(__snake_case , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
_UpperCAmelCase = ffmpeg_process.communicate(__snake_case )
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error
_UpperCAmelCase = output_stream[0]
_UpperCAmelCase = np.frombuffer(__snake_case , np.floataa )
if audio.shape[0] == 0:
raise ValueError("""Malformed soundfile""" )
return audio
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case = "f32le" , ) -> int:
_UpperCAmelCase = f"""{sampling_rate}"""
_UpperCAmelCase = "1"
if format_for_conversion == "s16le":
_UpperCAmelCase = 2
elif format_for_conversion == "f32le":
_UpperCAmelCase = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
_UpperCAmelCase = platform.system()
if system == "Linux":
_UpperCAmelCase = "alsa"
_UpperCAmelCase = "default"
elif system == "Darwin":
_UpperCAmelCase = "avfoundation"
_UpperCAmelCase = ":0"
elif system == "Windows":
_UpperCAmelCase = "dshow"
_UpperCAmelCase = "default"
_UpperCAmelCase = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
_UpperCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
_UpperCAmelCase = _ffmpeg_stream(__snake_case , __snake_case )
for item in iterator:
yield item
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case = None , __snake_case = None , __snake_case = "f32le" , ) -> List[str]:
if stream_chunk_s is not None:
_UpperCAmelCase = stream_chunk_s
else:
_UpperCAmelCase = chunk_length_s
_UpperCAmelCase = ffmpeg_microphone(__snake_case , __snake_case , format_for_conversion=__snake_case )
if format_for_conversion == "s16le":
_UpperCAmelCase = np.intaa
_UpperCAmelCase = 2
elif format_for_conversion == "f32le":
_UpperCAmelCase = np.floataa
_UpperCAmelCase = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
_UpperCAmelCase = chunk_length_s / 6
_UpperCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__snake_case , (int, float) ):
_UpperCAmelCase = [stride_length_s, stride_length_s]
_UpperCAmelCase = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
_UpperCAmelCase = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
_UpperCAmelCase = datetime.datetime.now()
_UpperCAmelCase = datetime.timedelta(seconds=__snake_case )
for item in chunk_bytes_iter(__snake_case , __snake_case , stride=(stride_left, stride_right) , stream=__snake_case ):
# Put everything back in numpy scale
_UpperCAmelCase = np.frombuffer(item["""raw"""] , dtype=__snake_case )
_UpperCAmelCase = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
_UpperCAmelCase = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 1_0 * delta:
# We're late !! SKIP
continue
yield item
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case , __snake_case = False ) -> str:
_UpperCAmelCase = B""
_UpperCAmelCase = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
_UpperCAmelCase = 0
for raw in iterator:
acc += raw
if stream and len(__snake_case ) < chunk_len:
_UpperCAmelCase = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__snake_case ) >= chunk_len:
# We are flushing the accumulator
_UpperCAmelCase = (_stride_left, stride_right)
_UpperCAmelCase = {"raw": acc[:chunk_len], "stride": stride}
if stream:
_UpperCAmelCase = False
yield item
_UpperCAmelCase = stride_left
_UpperCAmelCase = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__snake_case ) > stride_left:
_UpperCAmelCase = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
_UpperCAmelCase = False
yield item
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> Tuple:
_UpperCAmelCase = 2**2_4 # 16Mo
try:
with subprocess.Popen(__snake_case , stdout=subprocess.PIPE , bufsize=__snake_case ) as ffmpeg_process:
while True:
_UpperCAmelCase = ffmpeg_process.stdout.read(__snake_case )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error | 108 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class _lowerCamelCase ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
"""simple docstring"""
def __init__( self , UpperCAmelCase=None , **UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
super().__init__(features=UpperCAmelCase )
__snake_case : Optional[int] = torch_tensor_kwargs
import torch # noqa import torch at initialization
def UpperCAmelCase ( self , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
import torch
if isinstance(UpperCAmelCase , UpperCAmelCase ) and column:
if all(
isinstance(UpperCAmelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(UpperCAmelCase )
return column
def UpperCAmelCase ( self , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
import torch
if isinstance(UpperCAmelCase , (str, bytes, type(UpperCAmelCase )) ):
return value
elif isinstance(UpperCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
__snake_case : Optional[Any] = {}
if isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
__snake_case : List[Any] = {"dtype": torch.intaa}
elif isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
__snake_case : Union[str, Any] = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCAmelCase , PIL.Image.Image ):
__snake_case : Optional[int] = np.asarray(UpperCAmelCase )
return torch.tensor(UpperCAmelCase , **{**default_dtype, **self.torch_tensor_kwargs} )
def UpperCAmelCase ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(UpperCAmelCase , "__array__" ) and not isinstance(UpperCAmelCase , torch.Tensor ):
__snake_case : List[str] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCAmelCase , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] )
elif isinstance(UpperCAmelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] )
return self._tensorize(UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return map_nested(self._recursive_tensorize , UpperCAmelCase , map_list=UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> Mapping:
'''simple docstring'''
__snake_case : Any = self.numpy_arrow_extractor().extract_row(UpperCAmelCase )
__snake_case : Dict = self.python_features_decoder.decode_row(UpperCAmelCase )
return self.recursive_tensorize(UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> "torch.Tensor":
'''simple docstring'''
__snake_case : Optional[Any] = self.numpy_arrow_extractor().extract_column(UpperCAmelCase )
__snake_case : List[str] = self.python_features_decoder.decode_column(UpperCAmelCase , pa_table.column_names[0] )
__snake_case : Union[str, Any] = self.recursive_tensorize(UpperCAmelCase )
__snake_case : Union[str, Any] = self._consolidate(UpperCAmelCase )
return column
def UpperCAmelCase ( self , UpperCAmelCase ) -> Mapping:
'''simple docstring'''
__snake_case : str = self.numpy_arrow_extractor().extract_batch(UpperCAmelCase )
__snake_case : List[str] = self.python_features_decoder.decode_batch(UpperCAmelCase )
__snake_case : Union[str, Any] = self.recursive_tensorize(UpperCAmelCase )
for column_name in batch:
__snake_case : Dict = self._consolidate(batch[column_name] )
return batch
| 243 | 0 |
def lowerCamelCase_ ( A : int ):
"""simple docstring"""
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
lowerCAmelCase_ = [True] * (num + 1)
lowerCAmelCase_ = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , A ):
lowerCAmelCase_ = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 413 |
_snake_case = [
"DownloadConfig",
"DownloadManager",
"DownloadMode",
"StreamingDownloadManager",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 413 | 1 |
def A_ ( ) -> List[Any]:
a__ : Any = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
a__ : Optional[Any] = 6
a__ : Any = 1
a__ : Union[str, Any] = 1901
a__ : Dict = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
a__ : List[str] = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
a__ : int = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
a__ : Optional[int] = day - days_per_month[month - 2]
if month > 12:
year += 1
a__ : Any = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 302 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class A__ :
"""simple docstring"""
__A : Optional[int] = None
__A : Optional[jnp.ndarray] = None
__A : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def __lowercase ( cls) -> Union[str, Any]:
'''simple docstring'''
return cls()
@dataclass
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : jnp.ndarray
__A : jnp.ndarray
__A : KarrasVeSchedulerState
class A__ ( __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
@property
def __lowercase ( self) -> str:
'''simple docstring'''
return True
@register_to_config
def __init__( self , lowercase = 0.02 , lowercase = 100 , lowercase = 1.0_07 , lowercase = 80 , lowercase = 0.05 , lowercase = 50 , ) -> List[Any]:
'''simple docstring'''
pass
def __lowercase ( self) -> str:
'''simple docstring'''
return KarrasVeSchedulerState.create()
def __lowercase ( self , lowercase , lowercase , lowercase = ()) -> KarrasVeSchedulerState:
'''simple docstring'''
a__ : Any = jnp.arange(0 , lowercase)[::-1].copy()
a__ : Any = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=lowercase , schedule=jnp.array(lowercase , dtype=jnp.floataa) , timesteps=lowercase , )
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , ) -> Tuple[jnp.ndarray, float]:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
a__ : List[str] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1)
else:
a__ : str = 0
# sample eps ~ N(0, S_noise^2 * I)
a__ : Optional[Any] = random.split(lowercase , num=1)
a__ : Optional[Any] = self.config.s_noise * random.normal(key=lowercase , shape=sample.shape)
a__ : str = sigma + gamma * sigma
a__ : int = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
'''simple docstring'''
a__ : Union[str, Any] = sample_hat + sigma_hat * model_output
a__ : Tuple = (sample_hat - pred_original_sample) / sigma_hat
a__ : Dict = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowercase , derivative=lowercase , state=lowercase)
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
'''simple docstring'''
a__ : Optional[int] = sample_prev + sigma_prev * model_output
a__ : Union[str, Any] = (sample_prev - pred_original_sample) / sigma_prev
a__ : List[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowercase , derivative=lowercase , state=lowercase)
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase) -> int:
'''simple docstring'''
raise NotImplementedError()
| 302 | 1 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( __a ):
__SCREAMING_SNAKE_CASE : Tuple = ['audio_values', 'audio_mask']
def __init__(self , lowercase=2048 , lowercase=1 , lowercase=[16, 16] , lowercase=128 , lowercase=44100 , lowercase=86 , lowercase=2048 , lowercase=0.0 , **lowercase , ):
super().__init__(
feature_size=a_ , sampling_rate=a_ , padding_value=a_ , **a_ , )
A_ : Optional[int] = spectrogram_length
A_ : Dict = num_channels
A_ : int = patch_size
A_ : Any = feature_size // self.patch_size[1]
A_ : List[Any] = n_fft
A_ : List[str] = sampling_rate // hop_length_to_sampling_rate
A_ : int = sampling_rate
A_ : Dict = padding_value
A_ : Optional[int] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a_ , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=a_ , norm="""slaney""" , mel_scale="""slaney""" , ).T
def _a (self , lowercase ):
A_ : List[Any] = spectrogram(
a_ , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
A_ : Dict = log_spec[:, :-1]
A_ : Union[str, Any] = log_spec - 20.0
A_ : int = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__(self , lowercase , lowercase = None , lowercase = True , lowercase = None , lowercase = False , lowercase = False , **lowercase , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
F' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
F' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
A_ : str = isinstance(a_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
A_ : Optional[Any] = is_batched_numpy or (
isinstance(a_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A_ : List[str] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a_ , np.ndarray ):
A_ : int = np.asarray(a_ , dtype=np.floataa )
elif isinstance(a_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A_ : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A_ : List[str] = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
A_ : Dict = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , a_ ):
A_ : str = [np.asarray(a_ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
A_ : Any = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
A_ : Any = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
A_ : Optional[int] = np.array(a_ ).astype(np.floataa )
# convert into correct format for padding
A_ : Optional[int] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
A_ : Optional[Any] = np.ones([len(a_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
A_ : Any = padded_audio_features * self.padding_value
for i in range(len(a_ ) ):
A_ : Optional[Any] = audio_features[i]
A_ : Optional[int] = feature
# return as BatchFeature
if return_attention_mask:
A_ : Dict = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
A_ : Optional[Any] = {"""audio_values""": padded_audio_features}
A_ : int = BatchFeature(data=a_ , tensor_type=a_ )
return encoded_inputs | 706 |
'''simple docstring'''
import os
import sys
import unittest
lowerCamelCase :Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCamelCase :Tuple = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
lowerCamelCase :Tuple = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Tuple = get_test_to_tester_mapping(lowercase )
A_ : Union[str, Any] = get_test_to_tester_mapping(lowercase )
A_ : Union[str, Any] = {"""BertModelTest""": """BertModelTester"""}
A_ : Union[str, Any] = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
def _a (self ):
A_ : Optional[Any] = get_model_to_test_mapping(lowercase )
A_ : List[str] = get_model_to_test_mapping(lowercase )
A_ : Dict = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
A_ : Any = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
def _a (self ):
A_ : List[Any] = get_model_to_tester_mapping(lowercase )
A_ : Optional[int] = get_model_to_tester_mapping(lowercase )
A_ : Dict = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
A_ : Dict = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase ) | 686 | 0 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = '''ylacombe/bark-small'''
UpperCAmelCase__ : int = tempfile.mkdtemp()
UpperCAmelCase__ : Union[str, Any] = '''en_speaker_1'''
UpperCAmelCase__ : Union[str, Any] = '''This is a test string'''
UpperCAmelCase__ : Any = '''speaker_embeddings_path.json'''
UpperCAmelCase__ : Dict = '''speaker_embeddings'''
def lowercase_ ( self : str , **_A : Any ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCamelCase_ )
def lowercase_ ( self : int ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : int = self.get_tokenizer()
UpperCAmelCase__ : Optional[int] = BarkProcessor(tokenizer=UpperCamelCase_ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ : List[str] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
UpperCAmelCase__ : str = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCAmelCase__ : int = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
UpperCAmelCase__ : List[Any] = 35
UpperCAmelCase__ : Dict = 2
UpperCAmelCase__ : List[Any] = 8
UpperCAmelCase__ : Union[str, Any] = {
'''semantic_prompt''': np.ones(UpperCamelCase_ ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCAmelCase__ : int = processor(text=self.input_string , voice_preset=UpperCamelCase_ )
UpperCAmelCase__ : int = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCamelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCAmelCase__ : List[str] = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(UpperCamelCase_ , **UpperCamelCase_ )
UpperCAmelCase__ : List[str] = processor(text=self.input_string , voice_preset=UpperCamelCase_ )
UpperCAmelCase__ : str = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCamelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCAmelCase__ : Tuple = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.get_tokenizer()
UpperCAmelCase__ : int = BarkProcessor(tokenizer=UpperCamelCase_ )
UpperCAmelCase__ : Optional[int] = processor(text=self.input_string )
UpperCAmelCase__ : List[str] = tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 75 |
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None ) ->List[str]:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
snake_case__ = nn.Parameter(UpperCAmelCase_ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
snake_case__ = nn.Parameter(UpperCAmelCase_ )
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->List[Any]:
# set torch weights for 1-to-1 comparison
snake_case__ = np.asarray(weights[0] )
snake_case__ = np.asarray(weights[1] )
snake_case__ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(UpperCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCAmelCase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCAmelCase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCAmelCase_ ).view(-1 , UpperCAmelCase_ ).contiguous().transpose(0 , 1 ) , )
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->List[Any]:
# set torch weights for 1-to-1 comparison
snake_case__ = np.asarray(weights[0] )
snake_case__ = np.asarray(weights[1] )
snake_case__ = np.asarray(weights[2] )
snake_case__ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(UpperCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCAmelCase_ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(UpperCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCAmelCase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCAmelCase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCAmelCase_ ).view(-1 , UpperCAmelCase_ ).contiguous().transpose(0 , 1 ) , )
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Dict:
# layernorm 1
snake_case__ = weights[0][0][0]
snake_case__ = np.asarray(layer_norm_a[0] )
snake_case__ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(UpperCAmelCase_ ) , torch.tensor(UpperCAmelCase_ ) , )
# lsh weights + output
snake_case__ = weights[0][1]
if len(UpperCAmelCase_ ) < 4:
set_layer_weights_in_torch_lsh(UpperCAmelCase_ , torch_block.attention , UpperCAmelCase_ )
else:
set_layer_weights_in_torch_local(UpperCAmelCase_ , torch_block.attention , UpperCAmelCase_ )
# intermediate weighs
snake_case__ = weights[2][0][1][2]
# Chunked Feed Forward
if len(UpperCAmelCase_ ) == 4:
snake_case__ = intermediate_weights[2]
# layernorm 2
snake_case__ = np.asarray(intermediate_weights[0][0] )
snake_case__ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(UpperCAmelCase_ ) , torch.tensor(UpperCAmelCase_ ) , )
# intermediate dense
snake_case__ = np.asarray(intermediate_weights[1][0] )
snake_case__ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(UpperCAmelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCAmelCase_ ) , )
# intermediate out
snake_case__ = np.asarray(intermediate_weights[4][0] )
snake_case__ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(UpperCAmelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCAmelCase_ ) , )
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Union[str, Any]:
# reformer model
snake_case__ = torch_model.reformer
# word embeds
snake_case__ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(UpperCAmelCase_ ) , )
if isinstance(weights[3] , UpperCAmelCase_ ):
snake_case__ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
snake_case__ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
snake_case__ = nn.Parameter(torch.tensor(UpperCAmelCase_ ) )
snake_case__ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
UpperCAmelCase_ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
snake_case__ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# output layer norm
snake_case__ = np.asarray(weights[7][0] )
snake_case__ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(UpperCAmelCase_ ) , torch.tensor(UpperCAmelCase_ ) , )
# output embeddings
snake_case__ = np.asarray(weights[9][0] )
snake_case__ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(UpperCAmelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCAmelCase_ ) , )
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->List[Any]:
# Initialise PyTorch model
snake_case__ = ReformerConfig.from_json_file(UpperCAmelCase_ )
print(f'''Building PyTorch model from configuration: {config}''' )
snake_case__ = ReformerModelWithLMHead(UpperCAmelCase_ )
with open(UpperCAmelCase_ , 'rb' ) as f:
snake_case__ = pickle.load(UpperCAmelCase_ )['weights']
set_model_weights_in_torch(UpperCAmelCase_ , UpperCAmelCase_ , config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , UpperCAmelCase_ )
if __name__ == "__main__":
a__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a__ : Dict = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 368 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase : List[str] = logging.get_logger(__name__)
lowercase : List[Any] = {
"""facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""",
}
class A__ ( __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__A : str = '''convnextv2'''
def __init__( self , lowercase=3 , lowercase=4 , lowercase=4 , lowercase=None , lowercase=None , lowercase="gelu" , lowercase=0.02 , lowercase=1e-12 , lowercase=0.0 , lowercase=224 , lowercase=None , lowercase=None , **lowercase , ) -> Tuple:
'''simple docstring'''
super().__init__(**lowercase)
a__ : Tuple = num_channels
a__ : Optional[int] = patch_size
a__ : List[Any] = num_stages
a__ : Optional[Any] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
a__ : Tuple = [3, 3, 9, 3] if depths is None else depths
a__ : Union[str, Any] = hidden_act
a__ : List[Any] = initializer_range
a__ : int = layer_norm_eps
a__ : List[Any] = drop_path_rate
a__ : Tuple = image_size
a__ : int = ['stem'] + [F'stage{idx}' for idx in range(1 , len(self.depths) + 1)]
a__ : Dict = get_aligned_output_features_output_indices(
out_features=lowercase , out_indices=lowercase , stage_names=self.stage_names)
| 717 |
from collections import namedtuple
lowercase : List[str] = namedtuple("""from_to""", """from_ to""")
lowercase : Tuple = {
"""cubicmeter""": from_to(1, 1),
"""litre""": from_to(0.001, 1_0_0_0),
"""kilolitre""": from_to(1, 1),
"""gallon""": from_to(0.00_454, 264.172),
"""cubicyard""": from_to(0.76_455, 1.30_795),
"""cubicfoot""": from_to(0.028, 35.3_147),
"""cup""": from_to(0.000_236_588, 4_226.75),
}
def A_ ( A__ , A__ , A__ ) -> float:
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'from_type\' value: {from_type!r} Supported values are:\n'
+ ', '.join(A__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'
+ ', '.join(A__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 392 | 0 |
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
SCREAMING_SNAKE_CASE_:Optional[Any] = """0.12""" # assumed parallelism: 8
@require_flax
@is_staging_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _lowerCAmelCase ( cls ):
A : Any = TOKEN
HfFolder.save_token(lowerCamelCase__ )
@classmethod
def _lowerCAmelCase ( cls ):
try:
delete_repo(token=cls._token, repo_id="""test-model-flax""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="""valid_org/test-model-flax-org""" )
except HTTPError:
pass
def _lowerCAmelCase ( self ):
A : Any = BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 )
A : List[Any] = FlaxBertModel(lowerCamelCase__ )
model.push_to_hub("""test-model-flax""", use_auth_token=self._token )
A : List[str] = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
A : Union[str, Any] = flatten_dict(unfreeze(model.params ) )
A : List[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A : int = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase__, 1e-3, msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token, repo_id="""test-model-flax""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase__, repo_id="""test-model-flax""", push_to_hub=lowerCamelCase__, use_auth_token=self._token )
A : int = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
A : Union[str, Any] = flatten_dict(unfreeze(model.params ) )
A : Tuple = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A : Dict = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase__, 1e-3, msg=f'''{key} not identical''' )
def _lowerCAmelCase ( self ):
A : Optional[Any] = BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 )
A : Dict = FlaxBertModel(lowerCamelCase__ )
model.push_to_hub("""valid_org/test-model-flax-org""", use_auth_token=self._token )
A : int = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
A : List[Any] = flatten_dict(unfreeze(model.params ) )
A : List[str] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A : Tuple = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase__, 1e-3, msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token, repo_id="""valid_org/test-model-flax-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
lowerCamelCase__, repo_id="""valid_org/test-model-flax-org""", push_to_hub=lowerCamelCase__, use_auth_token=self._token )
A : Tuple = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
A : str = flatten_dict(unfreeze(model.params ) )
A : int = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A : Union[str, Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase__, 1e-3, msg=f'''{key} not identical''' )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[int] = True
A : Optional[Any] = flatten_dict(modela.params )
A : Union[str, Any] = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
A : str = False
return models_are_equal
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : Any = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
A : int = FlaxBertModel(lowerCamelCase__ )
A : str = 'bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCamelCase__, lowerCamelCase__ ) )
with self.assertRaises(lowerCamelCase__ ):
A : str = FlaxBertModel.from_pretrained(lowerCamelCase__ )
A : Optional[Any] = FlaxBertModel.from_pretrained(lowerCamelCase__, subfolder=lowerCamelCase__ )
self.assertTrue(check_models_equal(lowerCamelCase__, lowerCamelCase__ ) )
def _lowerCAmelCase ( self ):
A : Optional[int] = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
A : Dict = FlaxBertModel(lowerCamelCase__ )
A : Tuple = 'bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCamelCase__, lowerCamelCase__ ), max_shard_size="""10KB""" )
with self.assertRaises(lowerCamelCase__ ):
A : int = FlaxBertModel.from_pretrained(lowerCamelCase__ )
A : str = FlaxBertModel.from_pretrained(lowerCamelCase__, subfolder=lowerCamelCase__ )
self.assertTrue(check_models_equal(lowerCamelCase__, lowerCamelCase__ ) )
def _lowerCAmelCase ( self ):
A : Union[str, Any] = 'bert'
A : List[str] = 'hf-internal-testing/tiny-random-bert-subfolder'
with self.assertRaises(lowerCamelCase__ ):
A : Union[str, Any] = FlaxBertModel.from_pretrained(lowerCamelCase__ )
A : Tuple = FlaxBertModel.from_pretrained(lowerCamelCase__, subfolder=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[Any] = 'bert'
A : Optional[int] = 'hf-internal-testing/tiny-random-bert-sharded-subfolder'
with self.assertRaises(lowerCamelCase__ ):
A : Tuple = FlaxBertModel.from_pretrained(lowerCamelCase__ )
A : Dict = FlaxBertModel.from_pretrained(lowerCamelCase__, subfolder=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
| 662 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class lowercase :
'''simple docstring'''
def __init__( self : List[Any] , snake_case : Optional[Any] , snake_case : int=13 , snake_case : int=7 , snake_case : Dict=True , snake_case : Union[str, Any]=True , snake_case : Union[str, Any]=True , snake_case : List[Any]=True , snake_case : Tuple=99 , snake_case : Any=[1, 1, 2] , snake_case : Dict=1 , snake_case : Optional[int]=32 , snake_case : Union[str, Any]=4 , snake_case : Optional[Any]=8 , snake_case : Dict=37 , snake_case : int="gelu_new" , snake_case : Optional[Any]=0.1 , snake_case : List[str]=0.1 , snake_case : Any=0.0 , snake_case : Dict=512 , snake_case : List[str]=3 , snake_case : Any=0.02 , snake_case : List[str]=3 , snake_case : Optional[Any]=4 , snake_case : Dict=None , snake_case : Any=False , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : List[str] = seq_length
SCREAMING_SNAKE_CASE : int = is_training
SCREAMING_SNAKE_CASE : Any = use_input_mask
SCREAMING_SNAKE_CASE : Tuple = use_token_type_ids
SCREAMING_SNAKE_CASE : Dict = use_labels
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : Dict = block_sizes
SCREAMING_SNAKE_CASE : Dict = num_decoder_layers
SCREAMING_SNAKE_CASE : int = d_model
SCREAMING_SNAKE_CASE : Union[str, Any] = n_head
SCREAMING_SNAKE_CASE : Optional[int] = d_head
SCREAMING_SNAKE_CASE : int = d_inner
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout
SCREAMING_SNAKE_CASE : Any = attention_dropout
SCREAMING_SNAKE_CASE : Optional[Any] = activation_dropout
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : str = 2
SCREAMING_SNAKE_CASE : Union[str, Any] = num_labels
SCREAMING_SNAKE_CASE : Tuple = num_choices
SCREAMING_SNAKE_CASE : str = scope
SCREAMING_SNAKE_CASE : int = initializer_std
# Used in the tests to check the size of the first attention layer
SCREAMING_SNAKE_CASE : int = n_head
# Used in the tests to check the size of the first hidden state
SCREAMING_SNAKE_CASE : List[str] = self.d_model
# Used in the tests to check the number of output hidden states/attentions
SCREAMING_SNAKE_CASE : List[str] = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
SCREAMING_SNAKE_CASE : Dict = self.num_hidden_layers + 2
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Dict = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : Dict = None
SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : Dict = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase_ ( self : Dict , snake_case : List[str] , snake_case : Tuple , snake_case : Union[str, Any] , snake_case : Any , snake_case : Dict , snake_case : Any , snake_case : Union[str, Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = TFFunnelModel(config=snake_case )
SCREAMING_SNAKE_CASE : Dict = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE : Optional[Any] = model(snake_case )
SCREAMING_SNAKE_CASE : Union[str, Any] = [input_ids, input_mask]
SCREAMING_SNAKE_CASE : List[Any] = model(snake_case )
SCREAMING_SNAKE_CASE : int = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Any = TFFunnelModel(config=snake_case )
SCREAMING_SNAKE_CASE : Dict = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : int = TFFunnelModel(config=snake_case )
SCREAMING_SNAKE_CASE : List[Any] = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def lowerCamelCase_ ( self : str , snake_case : Any , snake_case : Dict , snake_case : Any , snake_case : Optional[Any] , snake_case : Tuple , snake_case : Dict , snake_case : Optional[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = TFFunnelBaseModel(config=snake_case )
SCREAMING_SNAKE_CASE : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE : Union[str, Any] = model(snake_case )
SCREAMING_SNAKE_CASE : Optional[Any] = [input_ids, input_mask]
SCREAMING_SNAKE_CASE : Tuple = model(snake_case )
SCREAMING_SNAKE_CASE : Any = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : int = TFFunnelBaseModel(config=snake_case )
SCREAMING_SNAKE_CASE : Any = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : Optional[Any] = TFFunnelBaseModel(config=snake_case )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def lowerCamelCase_ ( self : Optional[int] , snake_case : Any , snake_case : str , snake_case : Optional[int] , snake_case : List[str] , snake_case : Union[str, Any] , snake_case : List[Any] , snake_case : Dict , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = TFFunnelForPreTraining(config=snake_case )
SCREAMING_SNAKE_CASE : str = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE : List[str] = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : List[str] , snake_case : Any , snake_case : int , snake_case : List[str] , snake_case : Tuple , snake_case : int , snake_case : str , snake_case : List[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = TFFunnelForMaskedLM(config=snake_case )
SCREAMING_SNAKE_CASE : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE : Tuple = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : Any , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : Dict , snake_case : Optional[Any] , snake_case : Dict , snake_case : Tuple , snake_case : str , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.num_labels
SCREAMING_SNAKE_CASE : Any = TFFunnelForSequenceClassification(config=snake_case )
SCREAMING_SNAKE_CASE : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE : Tuple = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : Optional[Any] , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : Any , snake_case : Dict , snake_case : Dict , snake_case : int , snake_case : Tuple , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.num_choices
SCREAMING_SNAKE_CASE : Optional[Any] = TFFunnelForMultipleChoice(config=snake_case )
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : Tuple = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : Tuple = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : Optional[int] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE : List[str] = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self : Any , snake_case : Optional[Any] , snake_case : Dict , snake_case : Any , snake_case : int , snake_case : Union[str, Any] , snake_case : Any , snake_case : Optional[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.num_labels
SCREAMING_SNAKE_CASE : List[Any] = TFFunnelForTokenClassification(config=snake_case )
SCREAMING_SNAKE_CASE : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE : Tuple = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self : str , snake_case : Optional[Any] , snake_case : Any , snake_case : Dict , snake_case : Any , snake_case : Union[str, Any] , snake_case : List[Any] , snake_case : Dict , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = TFFunnelForQuestionAnswering(config=snake_case )
SCREAMING_SNAKE_CASE : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE : Dict = model(snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Any = config_and_inputs
SCREAMING_SNAKE_CASE : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowercase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase):
'''simple docstring'''
UpperCAmelCase : int = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase : Optional[Any] = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase : Any = False
UpperCAmelCase : Tuple = False
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = TFFunnelModelTester(self )
SCREAMING_SNAKE_CASE : Any = ConfigTester(self , config_class=snake_case )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
@require_tf
class lowercase ( SCREAMING_SNAKE_CASE_ , unittest.TestCase):
'''simple docstring'''
UpperCAmelCase : Any = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
UpperCAmelCase : List[str] = False
UpperCAmelCase : Tuple = False
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = TFFunnelModelTester(self , base=snake_case )
SCREAMING_SNAKE_CASE : Optional[int] = ConfigTester(self , config_class=snake_case )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*snake_case )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case ) | 352 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase__ = {
"""configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100OnnxConfig"""],
"""tokenization_m2m_100""": ["""M2M100Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""M2M100ForConditionalGeneration""",
"""M2M100Model""",
"""M2M100PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 275 | """simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class a ( unittest.TestCase ):
def __init__( self : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str=7 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : List[Any]=18 , __lowerCAmelCase : int=30 , __lowerCAmelCase : Any=400 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : str=None , __lowerCAmelCase : Optional[Any]=True , ):
_UpperCAmelCase = size if size is not None else {"""height""": 18, """width""": 18}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = apply_ocr
def lowerCAmelCase_ ( self : int ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """size""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """apply_ocr""" ) )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowerCAmelCase_ ( self : List[str] ):
pass
def lowerCAmelCase_ ( self : List[str] ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , __lowerCAmelCase )
self.assertIsInstance(encoding.boxes , __lowerCAmelCase )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase_ ( self : Any ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase_ ( self : Optional[int] ):
# with apply_OCR = True
_UpperCAmelCase = LayoutLMvaImageProcessor()
from datasets import load_dataset
_UpperCAmelCase = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
_UpperCAmelCase = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
_UpperCAmelCase = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
_UpperCAmelCase = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __lowerCAmelCase )
self.assertListEqual(encoding.boxes , __lowerCAmelCase )
# with apply_OCR = False
_UpperCAmelCase = LayoutLMvaImageProcessor(apply_ocr=__lowerCAmelCase )
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 275 | 1 |
def A__ ( SCREAMING_SNAKE_CASE_ : list ) -> int:
"""simple docstring"""
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
_UpperCAmelCase = grid[0]
for row_n in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
_UpperCAmelCase = grid[row_n]
_UpperCAmelCase = fill_row(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = grid[row_n]
return grid[-1][-1]
def A__ ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list ) -> list:
"""simple docstring"""
current_row[0] += row_above[0]
for cell_n in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 |
from __future__ import annotations
from typing import Any
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
"""simple docstring"""
if not postfix_notation:
return 0
_SCREAMING_SNAKE_CASE = {'+', '-', '*', '/'}
_SCREAMING_SNAKE_CASE = []
for token in postfix_notation:
if token in operations:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(UpperCAmelCase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 605 | 0 |
Dataset Card for "python_codestyles-mixed1-1k"
This dataset contains negative and positive examples with python code of compliance with a code style. A positive
example represents compliance with the code style (label is 1). Each example is composed of two components, the first
component consists of a code that either conforms to the code style or violates it and the second component
corresponding to an example code that already conforms to a code style.
The dataset combines both
datasets infinityofspace/python_codestyles-random-1k
and infinityofspace/python_codestyles-single-1k
by randomly selecting half of the examples from each of the two datasets.
The code styles in the combined dataset differ in at least one and exactly one codestyle rule, which is called a
mixed
codestyle dataset variant. The dataset consists of a training and test group, with none of the code styles
overlapping between groups. In addition, both groups contain completely different underlying codes.
The examples contain source code from the following repositories:
repository | tag or commit |
---|---|
TheAlgorithms/Python | f614ed72170011d2d439f7901e1c8daa7deac8c4 |
huggingface/transformers | v4.31.0 |
huggingface/datasets | 2.13.1 |
huggingface/diffusers | v0.18.2 |
huggingface/accelerate | v0.21.0 |
- Downloads last month
- 49