code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
lowerCAmelCase__ : Optional[Union[str, Path]] = None
lowerCAmelCase__ : bool = False
lowerCAmelCase__ : bool = False
lowerCAmelCase__ : bool = False
lowerCAmelCase__ : Optional[Dict] = None
lowerCAmelCase__ : Optional[str] = None
lowerCAmelCase__ : bool = False
lowerCAmelCase__ : bool = False
lowerCAmelCase__ : bool = False
lowerCAmelCase__ : bool = True
lowerCAmelCase__ : Optional[int] = None
lowerCAmelCase__ : int = 1
lowerCAmelCase__ : Optional[Union[str, bool]] = None
lowerCAmelCase__ : bool = False
lowerCAmelCase__ : Optional[Dict] = None
lowerCAmelCase__ : Optional[str] = None
def _UpperCAmelCase ( self: Any ) -> "DownloadConfig":
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(__lowerCAmelCase ) for k, v in self.__dict__.items()} )
| 221 | import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
a_ = 250004
a_ = 250020
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = MBartaaTokenizer
lowerCAmelCase__ : List[Any] = MBartaaTokenizerFast
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Optional[Any] = True
def _UpperCAmelCase ( self: int ) -> List[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase = MBartaaTokenizer(__lowerCAmelCase , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=__lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self: Dict ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = "<s>"
__UpperCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) , __lowerCAmelCase )
def _UpperCAmelCase ( self: List[str] ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(__lowerCAmelCase ) , 1_054 )
def _UpperCAmelCase ( self: Any ) -> List[str]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_054 )
def _UpperCAmelCase ( self: Optional[Any] ) -> Any:
'''simple docstring'''
__UpperCAmelCase = MBartaaTokenizer(__lowerCAmelCase , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=__lowerCAmelCase )
__UpperCAmelCase = tokenizer.tokenize("This is a test" )
self.assertListEqual(__lowerCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__UpperCAmelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__lowerCAmelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
__UpperCAmelCase = tokenizer.convert_tokens_to_ids(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__UpperCAmelCase = tokenizer.convert_ids_to_tokens(__lowerCAmelCase )
self.assertListEqual(
__lowerCAmelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def _UpperCAmelCase ( self: List[str] ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = {"input_ids": [[250_004, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [250_004, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250_004, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name="facebook/mbart-large-50" , revision="d3913889c59cd5c9e456b269c376325eabad57e2" , )
def _UpperCAmelCase ( self: Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__UpperCAmelCase = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart50", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__UpperCAmelCase = self.tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase )
__UpperCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
__UpperCAmelCase = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase )
# Checks everything loads correctly in the same way
__UpperCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase )
__UpperCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowerCAmelCase )
# Save tokenizer rust, legacy_format=True
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase )
__UpperCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase )
# Checks it save with the same files
self.assertSequenceEqual(__lowerCAmelCase , __lowerCAmelCase )
# Checks everything loads correctly in the same way
__UpperCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase )
__UpperCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase ) )
shutil.rmtree(__lowerCAmelCase )
# Save tokenizer rust, legacy_format=False
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = tokenizer_r.save_pretrained(__lowerCAmelCase , legacy_format=__lowerCAmelCase )
__UpperCAmelCase = tokenizer_p.save_pretrained(__lowerCAmelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__UpperCAmelCase = tokenizer_r.from_pretrained(__lowerCAmelCase )
__UpperCAmelCase = tokenizer_p.from_pretrained(__lowerCAmelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowerCAmelCase , __lowerCAmelCase ) )
shutil.rmtree(__lowerCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : Dict = 'facebook/mbart-large-50-one-to-many-mmt'
lowerCAmelCase__ : Optional[int] = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
lowerCAmelCase__ : Union[str, Any] = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
lowerCAmelCase__ : Optional[int] = [EN_CODE, 8274, 12_7873, 2_5916, 7, 8622, 2071, 438, 6_7485, 53, 18_7895, 23, 5_1712, 2]
@classmethod
def _UpperCAmelCase ( cls: Any ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" )
__UpperCAmelCase = 1
return cls
def _UpperCAmelCase ( self: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 250_020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["mr_IN"] , 250_038 )
def _UpperCAmelCase ( self: Any ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __lowerCAmelCase )
def _UpperCAmelCase ( self: str ) -> Any:
'''simple docstring'''
self.assertIn(__lowerCAmelCase , self.tokenizer.all_special_ids )
__UpperCAmelCase = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
__UpperCAmelCase = self.tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
__UpperCAmelCase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , __lowerCAmelCase )
def _UpperCAmelCase ( self: Union[str, Any] ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , __lowerCAmelCase )
__UpperCAmelCase = 10
__UpperCAmelCase = self.tokenizer(__lowerCAmelCase , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase ).input_ids[0]
self.assertEqual(ids[0] , __lowerCAmelCase )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(__lowerCAmelCase ) , __lowerCAmelCase )
def _UpperCAmelCase ( self: int ) -> Dict:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [250_053, 250_001] )
def _UpperCAmelCase ( self: str ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = tempfile.mkdtemp()
__UpperCAmelCase = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__lowerCAmelCase )
__UpperCAmelCase = MBartaaTokenizer.from_pretrained(__lowerCAmelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowerCAmelCase )
@require_torch
def _UpperCAmelCase ( self: Any ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowerCAmelCase , return_tensors="pt" )
__UpperCAmelCase = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def _UpperCAmelCase ( self: Union[str, Any] ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
__UpperCAmelCase = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __lowerCAmelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def _UpperCAmelCase ( self: Union[str, Any] ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer(self.src_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=3 , return_tensors="pt" )
__UpperCAmelCase = self.tokenizer(
text_target=self.tgt_text , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=10 , return_tensors="pt" )
__UpperCAmelCase = targets["input_ids"]
__UpperCAmelCase = shift_tokens_right(__lowerCAmelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _UpperCAmelCase ( self: str ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , {
# en_XX, A, test, EOS
"input_ids": [[250_004, 62, 3_034, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 250_001,
} , )
| 221 | 1 |
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowercase__ ='src/diffusers'
lowercase__ ='.'
# This is to make sure the diffusers module imported is the one in the repo.
lowercase__ =importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowercase__ =spec.loader.load_module()
def UpperCamelCase_ ( A__ , A__ ):
return line.startswith(A__ ) or len(A__ ) <= 1 or re.search(r"""^\s*\)(\s*->.*:|:)\s*$""" , A__ ) is not None
def UpperCamelCase_ ( A__ ):
a_ = object_name.split(""".""" )
a_ = 0
# First let's find the module where our object lives.
a_ = parts[i]
while i < len(A__ ) and not os.path.isfile(os.path.join(A__ , F'''{module}.py''' ) ):
i += 1
if i < len(A__ ):
a_ = os.path.join(A__ , parts[i] )
if i >= len(A__ ):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(A__ , F'''{module}.py''' ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
a_ = f.readlines()
# Now let's find the class / func in the code!
a_ = """"""
a_ = 0
for name in parts[i + 1 :]:
while (
line_index < len(A__ ) and re.search(rF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(A__ ):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
a_ = line_index
while line_index < len(A__ ) and _should_continue(lines[line_index] , A__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
a_ = lines[start_index:line_index]
return "".join(A__ )
lowercase__ =re.compile(r'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
lowercase__ =re.compile(r'^\s*(\S+)->(\S+)(\s+.*|$)')
lowercase__ =re.compile(r'<FILL\s+[^>]*>')
def UpperCamelCase_ ( A__ ):
a_ = code.split("""\n""" )
a_ = 0
while idx < len(A__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(A__ ):
return re.search(r"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def UpperCamelCase_ ( A__ ):
a_ = len(get_indent(A__ ) ) > 0
if has_indent:
a_ = F'''class Bla:\n{code}'''
a_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=A__ )
a_ = black.format_str(A__ , mode=A__ )
a_ , a_ = style_docstrings_in_code(A__ )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def UpperCamelCase_ ( A__ , A__=False ):
with open(A__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
a_ = f.readlines()
a_ = []
a_ = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(A__ ):
a_ = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
a_ , a_ , a_ = search.groups()
a_ = find_code_in_diffusers(A__ )
a_ = get_indent(A__ )
a_ = line_index + 1 if indent == theoretical_indent else line_index + 2
a_ = theoretical_indent
a_ = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
a_ = True
while line_index < len(A__ ) and should_continue:
line_index += 1
if line_index >= len(A__ ):
break
a_ = lines[line_index]
a_ = _should_continue(A__ , A__ ) and re.search(F'''^{indent}# End copy''' , A__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
a_ = lines[start_index:line_index]
a_ = """""".join(A__ )
# Remove any nested `Copied from` comments to avoid circular copies
a_ = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(A__ ) is None]
a_ = """\n""".join(A__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(A__ ) > 0:
a_ = replace_pattern.replace("""with""" , """""" ).split(""",""" )
a_ = [_re_replace_pattern.search(A__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
a_ , a_ , a_ = pattern.groups()
a_ = re.sub(A__ , A__ , A__ )
if option.strip() == "all-casing":
a_ = re.sub(obja.lower() , obja.lower() , A__ )
a_ = re.sub(obja.upper() , obja.upper() , A__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
a_ = blackify(lines[start_index - 1] + theoretical_code )
a_ = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
a_ = lines[:start_index] + [theoretical_code] + lines[line_index:]
a_ = start_index + 1
if overwrite and len(A__ ) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''' )
with open(A__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(A__ )
return diffs
def UpperCamelCase_ ( A__ = False ):
a_ = glob.glob(os.path.join(A__ , """**/*.py""" ) , recursive=A__ )
a_ = []
for filename in all_files:
a_ = is_copy_consistent(A__ , A__ )
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(A__ ) > 0:
a_ = """\n""".join(A__ )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowercase__ =parser.parse_args()
check_copies(args.fix_and_overwrite)
| 511 |
'''simple docstring'''
def UpperCamelCase_ ( A__ ):
if n_term == "":
return []
a_ = []
for temp in range(int(A__ ) ):
series.append(F'''1/{temp + 1}''' if series else """1""" )
return series
if __name__ == "__main__":
lowercase__ =input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 511 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a__ : Any = logging.get_logger(__name__)
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = ['''pixel_values''']
def __init__( self , lowercase = True , lowercase = None , lowercase = 0.9 , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = None , lowercase = 1 / 2_5_5 , lowercase = True , lowercase = True , lowercase = None , lowercase = None , **lowercase , ) -> None:
super().__init__(**lowercase )
__UpperCamelCase = size if size is not None else {"""shortest_edge""": 2_2_4}
__UpperCamelCase = get_size_dict(lowercase , default_to_square=lowercase )
__UpperCamelCase = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
__UpperCamelCase = get_size_dict(lowercase , param_name="""crop_size""" )
__UpperCamelCase = do_resize
__UpperCamelCase = size
__UpperCamelCase = crop_pct
__UpperCamelCase = resample
__UpperCamelCase = do_center_crop
__UpperCamelCase = crop_size
__UpperCamelCase = do_rescale
__UpperCamelCase = rescale_factor
__UpperCamelCase = do_normalize
__UpperCamelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__UpperCamelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __lowerCamelCase ( self , lowercase , lowercase , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ) -> np.ndarray:
__UpperCamelCase = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f"size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
if crop_pct is not None:
if "shortest_edge" in size:
__UpperCamelCase = int(size["""shortest_edge"""] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
__UpperCamelCase = int(size["""height"""] / crop_pct )
else:
__UpperCamelCase = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct ))
else:
raise ValueError("""Invalid size for resize: {}""".format(lowercase ) )
__UpperCamelCase = get_resize_output_image_size(lowercase , size=lowercase , default_to_square=lowercase )
else:
if "shortest_edge" in size:
__UpperCamelCase = get_resize_output_image_size(lowercase , size=size["""shortest_edge"""] , default_to_square=lowercase )
elif "height" in size and "width" in size:
__UpperCamelCase = (size["""height"""], size["""width"""])
else:
raise ValueError("""Invalid size for resize: {}""".format(lowercase ) )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray:
__UpperCamelCase = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"size must contain 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(lowercase , size=(size["""height"""], size["""width"""]) , data_format=lowercase , **lowercase )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> str:
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def __lowerCamelCase ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray:
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def __lowerCamelCase ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ) -> PIL.Image.Image:
__UpperCamelCase = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase = crop_pct if crop_pct is not None else self.crop_pct
__UpperCamelCase = resample if resample is not None else self.resample
__UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase = image_mean if image_mean is not None else self.image_mean
__UpperCamelCase = image_std if image_std is not None else self.image_std
__UpperCamelCase = size if size is not None else self.size
__UpperCamelCase = get_size_dict(lowercase , default_to_square=lowercase )
__UpperCamelCase = crop_size if crop_size is not None else self.crop_size
__UpperCamelCase = get_size_dict(lowercase , param_name="""crop_size""" )
__UpperCamelCase = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_pct is None:
raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__UpperCamelCase = [to_numpy_array(lowercase ) for image in images]
if do_resize:
__UpperCamelCase = [self.resize(image=lowercase , size=lowercase , crop_pct=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
__UpperCamelCase = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
__UpperCamelCase = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
__UpperCamelCase = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
__UpperCamelCase = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
__UpperCamelCase = {"""pixel_values""": images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
| 601 |
'''simple docstring'''
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = []
__UpperCamelCase = 1
while len(__A ) < 1E6:
constant.append(str(__A ) )
i += 1
__UpperCamelCase = """""".join(__A )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9_999] )
* int(constant[99_999] )
* int(constant[999_999] )
)
if __name__ == "__main__":
print(solution())
| 601 | 1 |
import sys
from collections import defaultdict
class A :
'''simple docstring'''
def __init__( self : Dict ) -> Any:
"""simple docstring"""
A__ = []
def a_ ( self : int , __lowerCAmelCase : List[Any] ) -> int:
"""simple docstring"""
return self.node_position[vertex]
def a_ ( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
A__ = pos
def a_ ( self : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
A__ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
A__ = 2 * start + 1
else:
A__ = 2 * start + 2
if heap[smallest_child] < heap[start]:
A__ , A__ = heap[smallest_child], positions[smallest_child]
A__ , A__ = (
heap[start],
positions[start],
)
A__ , A__ = temp, tempa
A__ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __lowerCAmelCase )
self.top_to_bottom(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def a_ ( self : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : int ) -> List[Any]:
"""simple docstring"""
A__ = position[index]
while index != 0:
A__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
A__ = heap[parent]
A__ = position[parent]
self.set_position(position[parent] , __lowerCAmelCase )
else:
A__ = val
A__ = temp
self.set_position(__lowerCAmelCase , __lowerCAmelCase )
break
A__ = parent
else:
A__ = val
A__ = temp
self.set_position(__lowerCAmelCase , 0 )
def a_ ( self : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] ) -> str:
"""simple docstring"""
A__ = len(__lowerCAmelCase ) // 2 - 1
for i in range(__lowerCAmelCase , -1 , -1 ):
self.top_to_bottom(__lowerCAmelCase , __lowerCAmelCase , len(__lowerCAmelCase ) , __lowerCAmelCase )
def a_ ( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
A__ = positions[0]
A__ = sys.maxsize
self.top_to_bottom(__lowerCAmelCase , 0 , len(__lowerCAmelCase ) , __lowerCAmelCase )
return temp
def __lowerCamelCase ( __a :Any ) -> Dict:
"""simple docstring"""
A__ = Heap()
A__ = [0] * len(__a )
A__ = [-1] * len(__a ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
A__ = [] # Heap of Distance of vertices from their neighboring vertex
A__ = []
for vertex in range(len(__a ) ):
distance_tv.append(sys.maxsize )
positions.append(__a )
heap.node_position.append(__a )
A__ = []
A__ = 1
A__ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
A__ = 0
A__ = distance
heap.heapify(__a , __a )
for _ in range(1 , len(__a ) ):
A__ = heap.delete_minimum(__a , __a )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
A__ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__a )]
):
A__ = distance
heap.bottom_to_top(
__a , heap.get_position(__a ) , __a , __a )
A__ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A : Any = int(input('''Enter number of edges: ''').strip())
A : Any = defaultdict(list)
for _ in range(edges_number):
A : int = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 714 |
from __future__ import annotations
A : Optional[int] = 8.988e9 # units = N * m^s * C^-2
def __lowerCamelCase ( __a :float , __a :float , __a :float , __a :float ) -> dict[str, float]:
"""simple docstring"""
A__ = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if distance < 0:
raise ValueError("""Distance cannot be negative""" )
if force == 0:
A__ = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
A__ = abs(__a ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
A__ = abs(__a ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
A__ = (COULOMBS_CONSTANT * charge_product / abs(__a )) ** 0.5
return {"distance": distance}
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 247 | 0 |
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
_UpperCamelCase = logging.get_logger(__name__)
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
def __init__(self , *__a , **__a ) -> None:
"""simple docstring"""
warnings.warn(
'The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use OwlViTImageProcessor instead.' , __a , )
super().__init__(*__a , **__a )
| 146 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'''sail/poolformer_s12''': '''https://huggingface.co/sail/poolformer_s12/resolve/main/config.json''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """poolformer"""
def __init__(self , __a=3 , __a=16 , __a=16 , __a=3 , __a=4.0 , __a=[2, 2, 6, 2] , __a=[64, 128, 320, 512] , __a=[7, 3, 3, 3] , __a=[4, 2, 2, 2] , __a=[2, 1, 1, 1] , __a=4 , __a=0.0 , __a="gelu" , __a=True , __a=1E-5 , __a=0.02 , **__a , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = patch_size
UpperCAmelCase__ = stride
UpperCAmelCase__ = padding
UpperCAmelCase__ = pool_size
UpperCAmelCase__ = hidden_sizes
UpperCAmelCase__ = mlp_ratio
UpperCAmelCase__ = depths
UpperCAmelCase__ = patch_sizes
UpperCAmelCase__ = strides
UpperCAmelCase__ = num_encoder_blocks
UpperCAmelCase__ = drop_path_rate
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = use_layer_scale
UpperCAmelCase__ = layer_scale_init_value
UpperCAmelCase__ = initializer_range
super().__init__(**__a )
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = version.parse("""1.11""" )
@property
def UpperCamelCase__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCamelCase__ (self ) -> float:
"""simple docstring"""
return 2E-3
| 146 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase : Optional[Any] = {
'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'],
'tokenization_electra': ['ElectraTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[Any] = ['ElectraTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : str = [
'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'ElectraForCausalLM',
'ElectraForMaskedLM',
'ElectraForMultipleChoice',
'ElectraForPreTraining',
'ElectraForQuestionAnswering',
'ElectraForSequenceClassification',
'ElectraForTokenClassification',
'ElectraModel',
'ElectraPreTrainedModel',
'load_tf_weights_in_electra',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[Any] = [
'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFElectraForMaskedLM',
'TFElectraForMultipleChoice',
'TFElectraForPreTraining',
'TFElectraForQuestionAnswering',
'TFElectraForSequenceClassification',
'TFElectraForTokenClassification',
'TFElectraModel',
'TFElectraPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Dict = [
'FlaxElectraForCausalLM',
'FlaxElectraForMaskedLM',
'FlaxElectraForMultipleChoice',
'FlaxElectraForPreTraining',
'FlaxElectraForQuestionAnswering',
'FlaxElectraForSequenceClassification',
'FlaxElectraForTokenClassification',
'FlaxElectraModel',
'FlaxElectraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 9 |
'''simple docstring'''
from manim import *
class UpperCamelCase__ (a ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
lowerCamelCase__ = Rectangle(height=0.5 ,width=0.5 )
lowerCamelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = VGroup(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = Text("""CPU""" ,font_size=24 )
lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowerCAmelCase )
lowerCamelCase__ = [mem.copy() for i in range(1 )]
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = Text("""GPU""" ,font_size=24 )
lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase )
gpu.align_to(_lowerCAmelCase ,_lowerCAmelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(_lowerCAmelCase )
lowerCamelCase__ = [mem.copy() for i in range(6 )]
lowerCamelCase__ = VGroup(*_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0 )
lowerCamelCase__ = Text("""Model""" ,font_size=24 )
lowerCamelCase__ = Group(_lowerCAmelCase ,_lowerCAmelCase ).arrange(_lowerCAmelCase ,buff=0.5 ,aligned_edge=_lowerCAmelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(_lowerCAmelCase ,run_time=1 ) ,Create(_lowerCAmelCase ,run_time=1 ) ,Create(_lowerCAmelCase ,run_time=1 ) ,)
lowerCamelCase__ = MarkupText(
F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' ,font_size=24 ,)
lowerCamelCase__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase__ = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCAmelCase ,run_time=2.5 ) ,Write(_lowerCAmelCase ) ,Write(_lowerCAmelCase ) )
self.add(_lowerCAmelCase )
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
for i, rect in enumerate(_lowerCAmelCase ):
lowerCamelCase__ = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowerCAmelCase ,opacity=0.7 )
cpu_target.move_to(_lowerCAmelCase )
cpu_target.generate_target()
lowerCamelCase__ = 0.46 / 4
lowerCamelCase__ = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.02 ,direction=_lowerCAmelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target ,direction=_lowerCAmelCase ,buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=_lowerCAmelCase ,buff=0.0 )
cpu_targs.append(_lowerCAmelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_lowerCAmelCase ) )
second_animations.append(MoveToTarget(_lowerCAmelCase ,run_time=1.5 ) )
self.play(*_lowerCAmelCase )
self.play(*_lowerCAmelCase )
self.wait()
| 9 | 1 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> np.array:
_UpperCAmelCase = f"""{sampling_rate}"""
_UpperCAmelCase = "1"
_UpperCAmelCase = "f32le"
_UpperCAmelCase = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(__snake_case , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
_UpperCAmelCase = ffmpeg_process.communicate(__snake_case )
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error
_UpperCAmelCase = output_stream[0]
_UpperCAmelCase = np.frombuffer(__snake_case , np.floataa )
if audio.shape[0] == 0:
raise ValueError("""Malformed soundfile""" )
return audio
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case = "f32le" , ) -> int:
_UpperCAmelCase = f"""{sampling_rate}"""
_UpperCAmelCase = "1"
if format_for_conversion == "s16le":
_UpperCAmelCase = 2
elif format_for_conversion == "f32le":
_UpperCAmelCase = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
_UpperCAmelCase = platform.system()
if system == "Linux":
_UpperCAmelCase = "alsa"
_UpperCAmelCase = "default"
elif system == "Darwin":
_UpperCAmelCase = "avfoundation"
_UpperCAmelCase = ":0"
elif system == "Windows":
_UpperCAmelCase = "dshow"
_UpperCAmelCase = "default"
_UpperCAmelCase = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
_UpperCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
_UpperCAmelCase = _ffmpeg_stream(__snake_case , __snake_case )
for item in iterator:
yield item
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case = None , __snake_case = None , __snake_case = "f32le" , ) -> List[str]:
if stream_chunk_s is not None:
_UpperCAmelCase = stream_chunk_s
else:
_UpperCAmelCase = chunk_length_s
_UpperCAmelCase = ffmpeg_microphone(__snake_case , __snake_case , format_for_conversion=__snake_case )
if format_for_conversion == "s16le":
_UpperCAmelCase = np.intaa
_UpperCAmelCase = 2
elif format_for_conversion == "f32le":
_UpperCAmelCase = np.floataa
_UpperCAmelCase = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
_UpperCAmelCase = chunk_length_s / 6
_UpperCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__snake_case , (int, float) ):
_UpperCAmelCase = [stride_length_s, stride_length_s]
_UpperCAmelCase = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
_UpperCAmelCase = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
_UpperCAmelCase = datetime.datetime.now()
_UpperCAmelCase = datetime.timedelta(seconds=__snake_case )
for item in chunk_bytes_iter(__snake_case , __snake_case , stride=(stride_left, stride_right) , stream=__snake_case ):
# Put everything back in numpy scale
_UpperCAmelCase = np.frombuffer(item["""raw"""] , dtype=__snake_case )
_UpperCAmelCase = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
_UpperCAmelCase = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 1_0 * delta:
# We're late !! SKIP
continue
yield item
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case , __snake_case = False ) -> str:
_UpperCAmelCase = B""
_UpperCAmelCase = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
_UpperCAmelCase = 0
for raw in iterator:
acc += raw
if stream and len(__snake_case ) < chunk_len:
_UpperCAmelCase = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__snake_case ) >= chunk_len:
# We are flushing the accumulator
_UpperCAmelCase = (_stride_left, stride_right)
_UpperCAmelCase = {"raw": acc[:chunk_len], "stride": stride}
if stream:
_UpperCAmelCase = False
yield item
_UpperCAmelCase = stride_left
_UpperCAmelCase = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__snake_case ) > stride_left:
_UpperCAmelCase = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
_UpperCAmelCase = False
yield item
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> Tuple:
_UpperCAmelCase = 2**2_4 # 16Mo
try:
with subprocess.Popen(__snake_case , stdout=subprocess.PIPE , bufsize=__snake_case ) as ffmpeg_process:
while True:
_UpperCAmelCase = ffmpeg_process.stdout.read(__snake_case )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error | 108 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class _lowerCamelCase ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
"""simple docstring"""
def __init__( self , UpperCAmelCase=None , **UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
super().__init__(features=UpperCAmelCase )
__snake_case : Optional[int] = torch_tensor_kwargs
import torch # noqa import torch at initialization
def UpperCAmelCase ( self , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
import torch
if isinstance(UpperCAmelCase , UpperCAmelCase ) and column:
if all(
isinstance(UpperCAmelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(UpperCAmelCase )
return column
def UpperCAmelCase ( self , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
import torch
if isinstance(UpperCAmelCase , (str, bytes, type(UpperCAmelCase )) ):
return value
elif isinstance(UpperCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
__snake_case : Optional[Any] = {}
if isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
__snake_case : List[Any] = {"dtype": torch.intaa}
elif isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
__snake_case : Union[str, Any] = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCAmelCase , PIL.Image.Image ):
__snake_case : Optional[int] = np.asarray(UpperCAmelCase )
return torch.tensor(UpperCAmelCase , **{**default_dtype, **self.torch_tensor_kwargs} )
def UpperCAmelCase ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(UpperCAmelCase , "__array__" ) and not isinstance(UpperCAmelCase , torch.Tensor ):
__snake_case : List[str] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCAmelCase , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] )
elif isinstance(UpperCAmelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] )
return self._tensorize(UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return map_nested(self._recursive_tensorize , UpperCAmelCase , map_list=UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> Mapping:
'''simple docstring'''
__snake_case : Any = self.numpy_arrow_extractor().extract_row(UpperCAmelCase )
__snake_case : Dict = self.python_features_decoder.decode_row(UpperCAmelCase )
return self.recursive_tensorize(UpperCAmelCase )
def UpperCAmelCase ( self , UpperCAmelCase ) -> "torch.Tensor":
'''simple docstring'''
__snake_case : Optional[Any] = self.numpy_arrow_extractor().extract_column(UpperCAmelCase )
__snake_case : List[str] = self.python_features_decoder.decode_column(UpperCAmelCase , pa_table.column_names[0] )
__snake_case : Union[str, Any] = self.recursive_tensorize(UpperCAmelCase )
__snake_case : Union[str, Any] = self._consolidate(UpperCAmelCase )
return column
def UpperCAmelCase ( self , UpperCAmelCase ) -> Mapping:
'''simple docstring'''
__snake_case : str = self.numpy_arrow_extractor().extract_batch(UpperCAmelCase )
__snake_case : List[str] = self.python_features_decoder.decode_batch(UpperCAmelCase )
__snake_case : Union[str, Any] = self.recursive_tensorize(UpperCAmelCase )
for column_name in batch:
__snake_case : Dict = self._consolidate(batch[column_name] )
return batch
| 243 | 0 |
def lowerCamelCase_ ( A : int ):
"""simple docstring"""
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
lowerCAmelCase_ = [True] * (num + 1)
lowerCAmelCase_ = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , A ):
lowerCAmelCase_ = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 413 |
_snake_case = [
"DownloadConfig",
"DownloadManager",
"DownloadMode",
"StreamingDownloadManager",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 413 | 1 |
def A_ ( ) -> List[Any]:
a__ : Any = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
a__ : Optional[Any] = 6
a__ : Any = 1
a__ : Union[str, Any] = 1901
a__ : Dict = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
a__ : List[str] = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
a__ : int = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
a__ : Optional[int] = day - days_per_month[month - 2]
if month > 12:
year += 1
a__ : Any = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 302 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class A__ :
"""simple docstring"""
__A : Optional[int] = None
__A : Optional[jnp.ndarray] = None
__A : Optional[jnp.ndarray] = None # sigma(t_i)
@classmethod
def __lowercase ( cls) -> Union[str, Any]:
'''simple docstring'''
return cls()
@dataclass
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : jnp.ndarray
__A : jnp.ndarray
__A : KarrasVeSchedulerState
class A__ ( __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
@property
def __lowercase ( self) -> str:
'''simple docstring'''
return True
@register_to_config
def __init__( self , lowercase = 0.02 , lowercase = 100 , lowercase = 1.0_07 , lowercase = 80 , lowercase = 0.05 , lowercase = 50 , ) -> List[Any]:
'''simple docstring'''
pass
def __lowercase ( self) -> str:
'''simple docstring'''
return KarrasVeSchedulerState.create()
def __lowercase ( self , lowercase , lowercase , lowercase = ()) -> KarrasVeSchedulerState:
'''simple docstring'''
a__ : Any = jnp.arange(0 , lowercase)[::-1].copy()
a__ : Any = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=lowercase , schedule=jnp.array(lowercase , dtype=jnp.floataa) , timesteps=lowercase , )
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , ) -> Tuple[jnp.ndarray, float]:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
a__ : List[str] = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1)
else:
a__ : str = 0
# sample eps ~ N(0, S_noise^2 * I)
a__ : Optional[Any] = random.split(lowercase , num=1)
a__ : Optional[Any] = self.config.s_noise * random.normal(key=lowercase , shape=sample.shape)
a__ : str = sigma + gamma * sigma
a__ : int = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
'''simple docstring'''
a__ : Union[str, Any] = sample_hat + sigma_hat * model_output
a__ : Tuple = (sample_hat - pred_original_sample) / sigma_hat
a__ : Dict = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowercase , derivative=lowercase , state=lowercase)
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
'''simple docstring'''
a__ : Optional[int] = sample_prev + sigma_prev * model_output
a__ : Union[str, Any] = (sample_prev - pred_original_sample) / sigma_prev
a__ : List[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowercase , derivative=lowercase , state=lowercase)
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase) -> int:
'''simple docstring'''
raise NotImplementedError()
| 302 | 1 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCamelCase :Union[str, Any] = logging.get_logger(__name__)
class _lowerCAmelCase ( __a ):
__SCREAMING_SNAKE_CASE : Tuple = ['audio_values', 'audio_mask']
def __init__(self , lowercase=2048 , lowercase=1 , lowercase=[16, 16] , lowercase=128 , lowercase=44100 , lowercase=86 , lowercase=2048 , lowercase=0.0 , **lowercase , ):
super().__init__(
feature_size=a_ , sampling_rate=a_ , padding_value=a_ , **a_ , )
A_ : Optional[int] = spectrogram_length
A_ : Dict = num_channels
A_ : int = patch_size
A_ : Any = feature_size // self.patch_size[1]
A_ : List[Any] = n_fft
A_ : List[str] = sampling_rate // hop_length_to_sampling_rate
A_ : int = sampling_rate
A_ : Dict = padding_value
A_ : Optional[int] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a_ , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=a_ , norm="""slaney""" , mel_scale="""slaney""" , ).T
def _a (self , lowercase ):
A_ : List[Any] = spectrogram(
a_ , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
A_ : Dict = log_spec[:, :-1]
A_ : Union[str, Any] = log_spec - 20.0
A_ : int = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__(self , lowercase , lowercase = None , lowercase = True , lowercase = None , lowercase = False , lowercase = False , **lowercase , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
F' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
F' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
A_ : str = isinstance(a_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
A_ : Optional[Any] = is_batched_numpy or (
isinstance(a_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A_ : List[str] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a_ , np.ndarray ):
A_ : int = np.asarray(a_ , dtype=np.floataa )
elif isinstance(a_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A_ : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A_ : List[str] = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
A_ : Dict = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , a_ ):
A_ : str = [np.asarray(a_ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
A_ : Any = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
A_ : Any = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
A_ : Optional[int] = np.array(a_ ).astype(np.floataa )
# convert into correct format for padding
A_ : Optional[int] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
A_ : Optional[Any] = np.ones([len(a_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
A_ : Any = padded_audio_features * self.padding_value
for i in range(len(a_ ) ):
A_ : Optional[Any] = audio_features[i]
A_ : Optional[int] = feature
# return as BatchFeature
if return_attention_mask:
A_ : Dict = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
A_ : Optional[Any] = {"""audio_values""": padded_audio_features}
A_ : int = BatchFeature(data=a_ , tensor_type=a_ )
return encoded_inputs | 706 |
'''simple docstring'''
import os
import sys
import unittest
lowerCamelCase :Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCamelCase :Tuple = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
lowerCamelCase :Tuple = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Tuple = get_test_to_tester_mapping(lowercase )
A_ : Union[str, Any] = get_test_to_tester_mapping(lowercase )
A_ : Union[str, Any] = {"""BertModelTest""": """BertModelTester"""}
A_ : Union[str, Any] = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
def _a (self ):
A_ : Optional[Any] = get_model_to_test_mapping(lowercase )
A_ : List[str] = get_model_to_test_mapping(lowercase )
A_ : Dict = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
A_ : Any = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
def _a (self ):
A_ : List[Any] = get_model_to_tester_mapping(lowercase )
A_ : Optional[int] = get_model_to_tester_mapping(lowercase )
A_ : Dict = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
A_ : Dict = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase )
self.assertEqual(get_test_info.to_json(lowercase ) , lowercase ) | 686 | 0 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = '''ylacombe/bark-small'''
UpperCAmelCase__ : int = tempfile.mkdtemp()
UpperCAmelCase__ : Union[str, Any] = '''en_speaker_1'''
UpperCAmelCase__ : Union[str, Any] = '''This is a test string'''
UpperCAmelCase__ : Any = '''speaker_embeddings_path.json'''
UpperCAmelCase__ : Dict = '''speaker_embeddings'''
def lowercase_ ( self : str , **_A : Any ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCamelCase_ )
def lowercase_ ( self : int ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : int = self.get_tokenizer()
UpperCAmelCase__ : Optional[int] = BarkProcessor(tokenizer=UpperCamelCase_ )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ : List[str] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
UpperCAmelCase__ : str = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCAmelCase__ : int = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
UpperCAmelCase__ : List[Any] = 35
UpperCAmelCase__ : Dict = 2
UpperCAmelCase__ : List[Any] = 8
UpperCAmelCase__ : Union[str, Any] = {
'''semantic_prompt''': np.ones(UpperCamelCase_ ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
UpperCAmelCase__ : int = processor(text=self.input_string , voice_preset=UpperCamelCase_ )
UpperCAmelCase__ : int = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCamelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
UpperCAmelCase__ : List[str] = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(UpperCamelCase_ , **UpperCamelCase_ )
UpperCAmelCase__ : List[str] = processor(text=self.input_string , voice_preset=UpperCamelCase_ )
UpperCAmelCase__ : str = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCamelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
UpperCAmelCase__ : Tuple = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.get_tokenizer()
UpperCAmelCase__ : int = BarkProcessor(tokenizer=UpperCamelCase_ )
UpperCAmelCase__ : Optional[int] = processor(text=self.input_string )
UpperCAmelCase__ : List[str] = tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 75 |
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None ) ->List[str]:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
snake_case__ = nn.Parameter(UpperCAmelCase_ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
snake_case__ = nn.Parameter(UpperCAmelCase_ )
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->List[Any]:
# set torch weights for 1-to-1 comparison
snake_case__ = np.asarray(weights[0] )
snake_case__ = np.asarray(weights[1] )
snake_case__ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(UpperCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCAmelCase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCAmelCase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCAmelCase_ ).view(-1 , UpperCAmelCase_ ).contiguous().transpose(0 , 1 ) , )
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->List[Any]:
# set torch weights for 1-to-1 comparison
snake_case__ = np.asarray(weights[0] )
snake_case__ = np.asarray(weights[1] )
snake_case__ = np.asarray(weights[2] )
snake_case__ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(UpperCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCAmelCase_ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(UpperCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCAmelCase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCAmelCase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCAmelCase_ ).view(-1 , UpperCAmelCase_ ).contiguous().transpose(0 , 1 ) , )
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Dict:
# layernorm 1
snake_case__ = weights[0][0][0]
snake_case__ = np.asarray(layer_norm_a[0] )
snake_case__ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(UpperCAmelCase_ ) , torch.tensor(UpperCAmelCase_ ) , )
# lsh weights + output
snake_case__ = weights[0][1]
if len(UpperCAmelCase_ ) < 4:
set_layer_weights_in_torch_lsh(UpperCAmelCase_ , torch_block.attention , UpperCAmelCase_ )
else:
set_layer_weights_in_torch_local(UpperCAmelCase_ , torch_block.attention , UpperCAmelCase_ )
# intermediate weighs
snake_case__ = weights[2][0][1][2]
# Chunked Feed Forward
if len(UpperCAmelCase_ ) == 4:
snake_case__ = intermediate_weights[2]
# layernorm 2
snake_case__ = np.asarray(intermediate_weights[0][0] )
snake_case__ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(UpperCAmelCase_ ) , torch.tensor(UpperCAmelCase_ ) , )
# intermediate dense
snake_case__ = np.asarray(intermediate_weights[1][0] )
snake_case__ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(UpperCAmelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCAmelCase_ ) , )
# intermediate out
snake_case__ = np.asarray(intermediate_weights[4][0] )
snake_case__ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(UpperCAmelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCAmelCase_ ) , )
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Union[str, Any]:
# reformer model
snake_case__ = torch_model.reformer
# word embeds
snake_case__ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(UpperCAmelCase_ ) , )
if isinstance(weights[3] , UpperCAmelCase_ ):
snake_case__ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
snake_case__ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
snake_case__ = nn.Parameter(torch.tensor(UpperCAmelCase_ ) )
snake_case__ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
UpperCAmelCase_ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
snake_case__ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# output layer norm
snake_case__ = np.asarray(weights[7][0] )
snake_case__ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(UpperCAmelCase_ ) , torch.tensor(UpperCAmelCase_ ) , )
# output embeddings
snake_case__ = np.asarray(weights[9][0] )
snake_case__ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(UpperCAmelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCAmelCase_ ) , )
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->List[Any]:
# Initialise PyTorch model
snake_case__ = ReformerConfig.from_json_file(UpperCAmelCase_ )
print(f'''Building PyTorch model from configuration: {config}''' )
snake_case__ = ReformerModelWithLMHead(UpperCAmelCase_ )
with open(UpperCAmelCase_ , 'rb' ) as f:
snake_case__ = pickle.load(UpperCAmelCase_ )['weights']
set_model_weights_in_torch(UpperCAmelCase_ , UpperCAmelCase_ , config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , UpperCAmelCase_ )
if __name__ == "__main__":
a__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a__ : Dict = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 368 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase : List[str] = logging.get_logger(__name__)
lowercase : List[Any] = {
"""facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""",
}
class A__ ( __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
__A : str = '''convnextv2'''
def __init__( self , lowercase=3 , lowercase=4 , lowercase=4 , lowercase=None , lowercase=None , lowercase="gelu" , lowercase=0.02 , lowercase=1e-12 , lowercase=0.0 , lowercase=224 , lowercase=None , lowercase=None , **lowercase , ) -> Tuple:
'''simple docstring'''
super().__init__(**lowercase)
a__ : Tuple = num_channels
a__ : Optional[int] = patch_size
a__ : List[Any] = num_stages
a__ : Optional[Any] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
a__ : Tuple = [3, 3, 9, 3] if depths is None else depths
a__ : Union[str, Any] = hidden_act
a__ : List[Any] = initializer_range
a__ : int = layer_norm_eps
a__ : List[Any] = drop_path_rate
a__ : Tuple = image_size
a__ : int = ['stem'] + [F'stage{idx}' for idx in range(1 , len(self.depths) + 1)]
a__ : Dict = get_aligned_output_features_output_indices(
out_features=lowercase , out_indices=lowercase , stage_names=self.stage_names)
| 717 |
from collections import namedtuple
lowercase : List[str] = namedtuple("""from_to""", """from_ to""")
lowercase : Tuple = {
"""cubicmeter""": from_to(1, 1),
"""litre""": from_to(0.001, 1_0_0_0),
"""kilolitre""": from_to(1, 1),
"""gallon""": from_to(0.00_454, 264.172),
"""cubicyard""": from_to(0.76_455, 1.30_795),
"""cubicfoot""": from_to(0.028, 35.3_147),
"""cup""": from_to(0.000_236_588, 4_226.75),
}
def A_ ( A__ , A__ , A__ ) -> float:
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'from_type\' value: {from_type!r} Supported values are:\n'
+ ', '.join(A__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'
+ ', '.join(A__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 392 | 0 |
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
SCREAMING_SNAKE_CASE_:Optional[Any] = """0.12""" # assumed parallelism: 8
@require_flax
@is_staging_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _lowerCAmelCase ( cls ):
A : Any = TOKEN
HfFolder.save_token(lowerCamelCase__ )
@classmethod
def _lowerCAmelCase ( cls ):
try:
delete_repo(token=cls._token, repo_id="""test-model-flax""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="""valid_org/test-model-flax-org""" )
except HTTPError:
pass
def _lowerCAmelCase ( self ):
A : Any = BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 )
A : List[Any] = FlaxBertModel(lowerCamelCase__ )
model.push_to_hub("""test-model-flax""", use_auth_token=self._token )
A : List[str] = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
A : Union[str, Any] = flatten_dict(unfreeze(model.params ) )
A : List[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A : int = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase__, 1e-3, msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token, repo_id="""test-model-flax""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase__, repo_id="""test-model-flax""", push_to_hub=lowerCamelCase__, use_auth_token=self._token )
A : int = FlaxBertModel.from_pretrained(f'''{USER}/test-model-flax''' )
A : Union[str, Any] = flatten_dict(unfreeze(model.params ) )
A : Tuple = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A : Dict = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase__, 1e-3, msg=f'''{key} not identical''' )
def _lowerCAmelCase ( self ):
A : Optional[Any] = BertConfig(
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 )
A : Dict = FlaxBertModel(lowerCamelCase__ )
model.push_to_hub("""valid_org/test-model-flax-org""", use_auth_token=self._token )
A : int = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
A : List[Any] = flatten_dict(unfreeze(model.params ) )
A : List[str] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A : Tuple = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase__, 1e-3, msg=f'''{key} not identical''' )
# Reset repo
delete_repo(token=self._token, repo_id="""valid_org/test-model-flax-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
lowerCamelCase__, repo_id="""valid_org/test-model-flax-org""", push_to_hub=lowerCamelCase__, use_auth_token=self._token )
A : Tuple = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
A : str = flatten_dict(unfreeze(model.params ) )
A : int = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
A : Union[str, Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase__, 1e-3, msg=f'''{key} not identical''' )
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[int] = True
A : Optional[Any] = flatten_dict(modela.params )
A : Union[str, Any] = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
A : str = False
return models_are_equal
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : Any = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
A : int = FlaxBertModel(lowerCamelCase__ )
A : str = 'bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCamelCase__, lowerCamelCase__ ) )
with self.assertRaises(lowerCamelCase__ ):
A : str = FlaxBertModel.from_pretrained(lowerCamelCase__ )
A : Optional[Any] = FlaxBertModel.from_pretrained(lowerCamelCase__, subfolder=lowerCamelCase__ )
self.assertTrue(check_models_equal(lowerCamelCase__, lowerCamelCase__ ) )
def _lowerCAmelCase ( self ):
A : Optional[int] = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
A : Dict = FlaxBertModel(lowerCamelCase__ )
A : Tuple = 'bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCamelCase__, lowerCamelCase__ ), max_shard_size="""10KB""" )
with self.assertRaises(lowerCamelCase__ ):
A : int = FlaxBertModel.from_pretrained(lowerCamelCase__ )
A : str = FlaxBertModel.from_pretrained(lowerCamelCase__, subfolder=lowerCamelCase__ )
self.assertTrue(check_models_equal(lowerCamelCase__, lowerCamelCase__ ) )
def _lowerCAmelCase ( self ):
A : Union[str, Any] = 'bert'
A : List[str] = 'hf-internal-testing/tiny-random-bert-subfolder'
with self.assertRaises(lowerCamelCase__ ):
A : Union[str, Any] = FlaxBertModel.from_pretrained(lowerCamelCase__ )
A : Tuple = FlaxBertModel.from_pretrained(lowerCamelCase__, subfolder=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Optional[Any] = 'bert'
A : Optional[int] = 'hf-internal-testing/tiny-random-bert-sharded-subfolder'
with self.assertRaises(lowerCamelCase__ ):
A : Tuple = FlaxBertModel.from_pretrained(lowerCamelCase__ )
A : Dict = FlaxBertModel.from_pretrained(lowerCamelCase__, subfolder=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
| 662 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class lowercase :
'''simple docstring'''
def __init__( self : List[Any] , snake_case : Optional[Any] , snake_case : int=13 , snake_case : int=7 , snake_case : Dict=True , snake_case : Union[str, Any]=True , snake_case : Union[str, Any]=True , snake_case : List[Any]=True , snake_case : Tuple=99 , snake_case : Any=[1, 1, 2] , snake_case : Dict=1 , snake_case : Optional[int]=32 , snake_case : Union[str, Any]=4 , snake_case : Optional[Any]=8 , snake_case : Dict=37 , snake_case : int="gelu_new" , snake_case : Optional[Any]=0.1 , snake_case : List[str]=0.1 , snake_case : Any=0.0 , snake_case : Dict=512 , snake_case : List[str]=3 , snake_case : Any=0.02 , snake_case : List[str]=3 , snake_case : Optional[Any]=4 , snake_case : Dict=None , snake_case : Any=False , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : List[str] = seq_length
SCREAMING_SNAKE_CASE : int = is_training
SCREAMING_SNAKE_CASE : Any = use_input_mask
SCREAMING_SNAKE_CASE : Tuple = use_token_type_ids
SCREAMING_SNAKE_CASE : Dict = use_labels
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : Dict = block_sizes
SCREAMING_SNAKE_CASE : Dict = num_decoder_layers
SCREAMING_SNAKE_CASE : int = d_model
SCREAMING_SNAKE_CASE : Union[str, Any] = n_head
SCREAMING_SNAKE_CASE : Optional[int] = d_head
SCREAMING_SNAKE_CASE : int = d_inner
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout
SCREAMING_SNAKE_CASE : Any = attention_dropout
SCREAMING_SNAKE_CASE : Optional[Any] = activation_dropout
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : str = 2
SCREAMING_SNAKE_CASE : Union[str, Any] = num_labels
SCREAMING_SNAKE_CASE : Tuple = num_choices
SCREAMING_SNAKE_CASE : str = scope
SCREAMING_SNAKE_CASE : int = initializer_std
# Used in the tests to check the size of the first attention layer
SCREAMING_SNAKE_CASE : int = n_head
# Used in the tests to check the size of the first hidden state
SCREAMING_SNAKE_CASE : List[str] = self.d_model
# Used in the tests to check the number of output hidden states/attentions
SCREAMING_SNAKE_CASE : List[str] = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
SCREAMING_SNAKE_CASE : Dict = self.num_hidden_layers + 2
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Dict = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : Dict = None
SCREAMING_SNAKE_CASE : int = None
if self.use_labels:
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : Dict = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCamelCase_ ( self : Dict , snake_case : List[str] , snake_case : Tuple , snake_case : Union[str, Any] , snake_case : Any , snake_case : Dict , snake_case : Any , snake_case : Union[str, Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = TFFunnelModel(config=snake_case )
SCREAMING_SNAKE_CASE : Dict = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE : Optional[Any] = model(snake_case )
SCREAMING_SNAKE_CASE : Union[str, Any] = [input_ids, input_mask]
SCREAMING_SNAKE_CASE : List[Any] = model(snake_case )
SCREAMING_SNAKE_CASE : int = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Any = TFFunnelModel(config=snake_case )
SCREAMING_SNAKE_CASE : Dict = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : int = TFFunnelModel(config=snake_case )
SCREAMING_SNAKE_CASE : List[Any] = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def lowerCamelCase_ ( self : str , snake_case : Any , snake_case : Dict , snake_case : Any , snake_case : Optional[Any] , snake_case : Tuple , snake_case : Dict , snake_case : Optional[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = TFFunnelBaseModel(config=snake_case )
SCREAMING_SNAKE_CASE : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE : Union[str, Any] = model(snake_case )
SCREAMING_SNAKE_CASE : Optional[Any] = [input_ids, input_mask]
SCREAMING_SNAKE_CASE : Tuple = model(snake_case )
SCREAMING_SNAKE_CASE : Any = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : int = TFFunnelBaseModel(config=snake_case )
SCREAMING_SNAKE_CASE : Any = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : Optional[Any] = TFFunnelBaseModel(config=snake_case )
SCREAMING_SNAKE_CASE : Union[str, Any] = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def lowerCamelCase_ ( self : Optional[int] , snake_case : Any , snake_case : str , snake_case : Optional[int] , snake_case : List[str] , snake_case : Union[str, Any] , snake_case : List[Any] , snake_case : Dict , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = TFFunnelForPreTraining(config=snake_case )
SCREAMING_SNAKE_CASE : str = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE : List[str] = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : List[str] , snake_case : Any , snake_case : int , snake_case : List[str] , snake_case : Tuple , snake_case : int , snake_case : str , snake_case : List[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = TFFunnelForMaskedLM(config=snake_case )
SCREAMING_SNAKE_CASE : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE : Tuple = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : Any , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : Dict , snake_case : Optional[Any] , snake_case : Dict , snake_case : Tuple , snake_case : str , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.num_labels
SCREAMING_SNAKE_CASE : Any = TFFunnelForSequenceClassification(config=snake_case )
SCREAMING_SNAKE_CASE : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE : Tuple = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : Optional[Any] , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : Any , snake_case : Dict , snake_case : Dict , snake_case : int , snake_case : Tuple , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = self.num_choices
SCREAMING_SNAKE_CASE : Optional[Any] = TFFunnelForMultipleChoice(config=snake_case )
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : Tuple = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : Tuple = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
SCREAMING_SNAKE_CASE : Optional[int] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
SCREAMING_SNAKE_CASE : List[str] = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self : Any , snake_case : Optional[Any] , snake_case : Dict , snake_case : Any , snake_case : int , snake_case : Union[str, Any] , snake_case : Any , snake_case : Optional[Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.num_labels
SCREAMING_SNAKE_CASE : List[Any] = TFFunnelForTokenClassification(config=snake_case )
SCREAMING_SNAKE_CASE : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE : Tuple = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self : str , snake_case : Optional[Any] , snake_case : Any , snake_case : Dict , snake_case : Any , snake_case : Union[str, Any] , snake_case : List[Any] , snake_case : Dict , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = TFFunnelForQuestionAnswering(config=snake_case )
SCREAMING_SNAKE_CASE : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
SCREAMING_SNAKE_CASE : Dict = model(snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Any = config_and_inputs
SCREAMING_SNAKE_CASE : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowercase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase):
'''simple docstring'''
UpperCAmelCase : int = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase : Optional[Any] = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase : Any = False
UpperCAmelCase : Tuple = False
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = TFFunnelModelTester(self )
SCREAMING_SNAKE_CASE : Any = ConfigTester(self , config_class=snake_case )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
@require_tf
class lowercase ( SCREAMING_SNAKE_CASE_ , unittest.TestCase):
'''simple docstring'''
UpperCAmelCase : Any = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
UpperCAmelCase : List[str] = False
UpperCAmelCase : Tuple = False
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = TFFunnelModelTester(self , base=snake_case )
SCREAMING_SNAKE_CASE : Optional[int] = ConfigTester(self , config_class=snake_case )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*snake_case )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case ) | 352 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase__ = {
"""configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100OnnxConfig"""],
"""tokenization_m2m_100""": ["""M2M100Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""M2M100ForConditionalGeneration""",
"""M2M100Model""",
"""M2M100PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 275 | """simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class a ( unittest.TestCase ):
def __init__( self : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str=7 , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : List[Any]=18 , __lowerCAmelCase : int=30 , __lowerCAmelCase : Any=400 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : str=None , __lowerCAmelCase : Optional[Any]=True , ):
_UpperCAmelCase = size if size is not None else {"""height""": 18, """width""": 18}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = apply_ocr
def lowerCAmelCase_ ( self : int ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class a ( lowerCAmelCase_ , unittest.TestCase ):
_snake_case : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase_ ( self : Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """size""" ) )
self.assertTrue(hasattr(__lowerCAmelCase , """apply_ocr""" ) )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowerCAmelCase_ ( self : List[str] ):
pass
def lowerCAmelCase_ ( self : List[str] ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , __lowerCAmelCase )
self.assertIsInstance(encoding.boxes , __lowerCAmelCase )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase_ ( self : Optional[int] ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase_ ( self : Any ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCAmelCase , torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase_ ( self : Optional[int] ):
# with apply_OCR = True
_UpperCAmelCase = LayoutLMvaImageProcessor()
from datasets import load_dataset
_UpperCAmelCase = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
_UpperCAmelCase = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
_UpperCAmelCase = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
_UpperCAmelCase = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __lowerCAmelCase )
self.assertListEqual(encoding.boxes , __lowerCAmelCase )
# with apply_OCR = False
_UpperCAmelCase = LayoutLMvaImageProcessor(apply_ocr=__lowerCAmelCase )
_UpperCAmelCase = image_processing(__lowerCAmelCase , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 275 | 1 |
def A__ ( SCREAMING_SNAKE_CASE_ : list ) -> int:
"""simple docstring"""
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
_UpperCAmelCase = grid[0]
for row_n in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
_UpperCAmelCase = grid[row_n]
_UpperCAmelCase = fill_row(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = grid[row_n]
return grid[-1][-1]
def A__ ( SCREAMING_SNAKE_CASE_ : list , SCREAMING_SNAKE_CASE_ : list ) -> list:
"""simple docstring"""
current_row[0] += row_above[0]
for cell_n in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 |
from __future__ import annotations
from typing import Any
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
"""simple docstring"""
if not postfix_notation:
return 0
_SCREAMING_SNAKE_CASE = {'+', '-', '*', '/'}
_SCREAMING_SNAKE_CASE = []
for token in postfix_notation:
if token in operations:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(UpperCAmelCase__ ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 605 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : Tuple = ["image_processor", "tokenizer"]
snake_case__ : Optional[int] = "ViltImageProcessor"
snake_case__ : str = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , lowercase__=None , lowercase__=None , **lowercase__ ) -> int:
SCREAMING_SNAKE_CASE : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowercase__ , )
SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE : List[Any] = self.image_processor
def __call__( self , lowercase__ , lowercase__ = None , lowercase__ = True , lowercase__ = False , lowercase__ = None , lowercase__ = None , lowercase__ = 0 , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = False , lowercase__ = False , lowercase__ = True , lowercase__ = None , **lowercase__ , ) -> BatchEncoding:
SCREAMING_SNAKE_CASE : int = self.tokenizer(
text=lowercase__ , add_special_tokens=lowercase__ , padding=lowercase__ , truncation=lowercase__ , max_length=lowercase__ , stride=lowercase__ , pad_to_multiple_of=lowercase__ , return_token_type_ids=lowercase__ , return_attention_mask=lowercase__ , return_overflowing_tokens=lowercase__ , return_special_tokens_mask=lowercase__ , return_offsets_mapping=lowercase__ , return_length=lowercase__ , verbose=lowercase__ , return_tensors=lowercase__ , **lowercase__ , )
# add pixel_values + pixel_mask
SCREAMING_SNAKE_CASE : Dict = self.image_processor(lowercase__ , return_tensors=lowercase__ )
encoding.update(lowercase__ )
return encoding
def _UpperCamelCase ( self , *lowercase__ , **lowercase__ ) -> Any:
return self.tokenizer.batch_decode(*lowercase__ , **lowercase__ )
def _UpperCamelCase ( self , *lowercase__ , **lowercase__ ) -> List[Any]:
return self.tokenizer.decode(*lowercase__ , **lowercase__ )
@property
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE : Any = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _UpperCamelCase ( self ) -> Tuple:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowercase__ , )
return self.image_processor_class
@property
def _UpperCamelCase ( self ) -> Optional[Any]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowercase__ , )
return self.image_processor
| 179 | '''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : List[str] = (DDPMParallelScheduler,)
def _UpperCamelCase ( self , **lowercase__ ) -> Tuple:
SCREAMING_SNAKE_CASE : List[Any] = {
'num_train_timesteps': 1_000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**lowercase__ )
return config
def _UpperCamelCase ( self ) -> int:
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=lowercase__ )
def _UpperCamelCase ( self ) -> Optional[Any]:
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=lowercase__ , beta_end=lowercase__ )
def _UpperCamelCase ( self ) -> List[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase__ )
def _UpperCamelCase ( self ) -> str:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowercase__ )
def _UpperCamelCase ( self ) -> Optional[Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase__ )
def _UpperCamelCase ( self ) -> List[str]:
self.check_over_configs(thresholding=lowercase__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowercase__ , prediction_type=lowercase__ , sample_max_value=lowercase__ , )
def _UpperCamelCase ( self ) -> Dict:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase__ )
def _UpperCamelCase ( self ) -> Optional[Any]:
for t in [0, 500, 999]:
self.check_over_forward(time_step=lowercase__ )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**lowercase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1E-5
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**lowercase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = len(lowercase__ )
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_model()
SCREAMING_SNAKE_CASE : str = self.dummy_sample_deter
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_sample_deter + 0.1
SCREAMING_SNAKE_CASE : int = self.dummy_sample_deter - 0.1
SCREAMING_SNAKE_CASE : List[Any] = samplea.shape[0]
SCREAMING_SNAKE_CASE : str = torch.stack([samplea, samplea, samplea] , dim=0 )
SCREAMING_SNAKE_CASE : List[str] = torch.arange(lowercase__ )[0:3, None].repeat(1 , lowercase__ )
SCREAMING_SNAKE_CASE : Optional[int] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.batch_step_no_noise(lowercase__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
SCREAMING_SNAKE_CASE : List[Any] = torch.sum(torch.abs(lowercase__ ) )
SCREAMING_SNAKE_CASE : Any = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1E-2
assert abs(result_mean.item() - 0.5_0_0_5 ) < 1E-3
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler_class(**lowercase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = len(lowercase__ )
SCREAMING_SNAKE_CASE : Any = self.dummy_model()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_sample_deter
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
for t in reversed(range(lowercase__ ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE : int = model(lowercase__ , lowercase__ )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE : Dict = scheduler.step(lowercase__ , lowercase__ , lowercase__ , generator=lowercase__ ).prev_sample
SCREAMING_SNAKE_CASE : List[str] = pred_prev_sample
SCREAMING_SNAKE_CASE : Optional[int] = torch.sum(torch.abs(lowercase__ ) )
SCREAMING_SNAKE_CASE : Dict = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1E-3
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE : Dict = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Any = self.get_scheduler_config(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler_class(**lowercase__ )
SCREAMING_SNAKE_CASE : str = len(lowercase__ )
SCREAMING_SNAKE_CASE : Tuple = self.dummy_model()
SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter
SCREAMING_SNAKE_CASE : int = torch.manual_seed(0 )
for t in reversed(range(lowercase__ ) ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE : Tuple = model(lowercase__ , lowercase__ )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE : Optional[int] = scheduler.step(lowercase__ , lowercase__ , lowercase__ , generator=lowercase__ ).prev_sample
SCREAMING_SNAKE_CASE : List[Any] = pred_prev_sample
SCREAMING_SNAKE_CASE : Optional[Any] = torch.sum(torch.abs(lowercase__ ) )
SCREAMING_SNAKE_CASE : str = torch.mean(torch.abs(lowercase__ ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1E-3
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : List[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Optional[Any] = scheduler_class(**lowercase__ )
SCREAMING_SNAKE_CASE : Tuple = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=lowercase__ )
SCREAMING_SNAKE_CASE : str = scheduler.timesteps
for i, timestep in enumerate(lowercase__ ):
if i == len(lowercase__ ) - 1:
SCREAMING_SNAKE_CASE : List[str] = -1
else:
SCREAMING_SNAKE_CASE : Any = timesteps[i + 1]
SCREAMING_SNAKE_CASE : str = scheduler.previous_timestep(lowercase__ )
SCREAMING_SNAKE_CASE : str = prev_t.item()
self.assertEqual(lowercase__ , lowercase__ )
def _UpperCamelCase ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE : Tuple = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : List[Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**lowercase__ )
SCREAMING_SNAKE_CASE : int = [100, 87, 50, 51, 0]
with self.assertRaises(lowercase__ , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=lowercase__ )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : Tuple = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : List[str] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler_class(**lowercase__ )
SCREAMING_SNAKE_CASE : Tuple = [100, 87, 50, 1, 0]
SCREAMING_SNAKE_CASE : List[str] = len(lowercase__ )
with self.assertRaises(lowercase__ , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=lowercase__ , timesteps=lowercase__ )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**lowercase__ )
SCREAMING_SNAKE_CASE : Any = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowercase__ , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=lowercase__ )
| 179 | 1 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class lowerCamelCase__ ( lowercase_):
"""simple docstring"""
_A = CustomTokenizer
pass | 623 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = ''''''
_lowercase : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_lowercase : str = None # compression type in fsspec. ex: "gzip"
_lowercase : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : List[Any] , UpperCamelCase__ : str = "" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[dict] = None , **UpperCamelCase__ : List[Any]):
'''simple docstring'''
super().__init__(self , **UpperCamelCase__)
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
snake_case__ = fsspec.open(
UpperCamelCase__ , mode="""rb""" , protocol=UpperCamelCase__ , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {}), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
snake_case__ = os.path.basename(self.file.path.split("""::""")[0])
snake_case__ = (
self.compressed_name[: self.compressed_name.rindex(""".""")]
if """.""" in self.compressed_name
else self.compressed_name
)
snake_case__ = None
@classmethod
def __magic_name__ ( cls : Union[str, Any] , UpperCamelCase__ : List[Any]):
'''simple docstring'''
return super()._strip_protocol(UpperCamelCase__).lstrip("""/""")
def __magic_name__ ( self : Dict):
'''simple docstring'''
if self.dir_cache is None:
snake_case__ = {**self.file.fs.info(self.file.path), """name""": self.uncompressed_name}
snake_case__ = {f["""name"""]: f}
def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : str):
'''simple docstring'''
return self.file.open().read()
def __magic_name__ ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : str = "rb" , UpperCamelCase__ : Any=None , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
snake_case__ = self._strip_protocol(UpperCamelCase__)
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''')
return self.file.open()
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Dict = '''bz2'''
_lowercase : Dict = '''bz2'''
_lowercase : Optional[int] = '''.bz2'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Dict = '''gzip'''
_lowercase : List[str] = '''gzip'''
_lowercase : Any = '''.gz'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : str = '''lz4'''
_lowercase : List[Any] = '''lz4'''
_lowercase : Dict = '''.lz4'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = '''xz'''
_lowercase : Union[str, Any] = '''xz'''
_lowercase : Optional[int] = '''.xz'''
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
_lowercase : Optional[int] = '''zstd'''
_lowercase : Tuple = '''zstd'''
_lowercase : Union[str, Any] = '''.zst'''
def __init__( self : str , UpperCamelCase__ : str , UpperCamelCase__ : str = "rb" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[dict] = None , UpperCamelCase__ : int = DEFAULT_BLOCK_SIZE , **UpperCamelCase__ : int , ):
'''simple docstring'''
super().__init__(
fo=UpperCamelCase__ , mode=UpperCamelCase__ , target_protocol=UpperCamelCase__ , target_options=UpperCamelCase__ , block_size=UpperCamelCase__ , **UpperCamelCase__ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
snake_case__ = self.file.__enter__
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase__ : str):
'''simple docstring'''
snake_case__ = file_
def __enter__( self : List[str]):
'''simple docstring'''
self._file.__enter__()
return self
def __exit__( self : Dict , *UpperCamelCase__ : str , **UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
self._file.__exit__(*UpperCamelCase__ , **UpperCamelCase__)
def __iter__( self : Any):
'''simple docstring'''
return iter(self._file)
def __magic_name__ ( self : List[str]):
'''simple docstring'''
return next(self._file)
def __getattr__( self : Any , UpperCamelCase__ : int):
'''simple docstring'''
return getattr(self._file , UpperCamelCase__)
def fixed_enter(*UpperCamelCase__ : int , **UpperCamelCase__ : int):
return WrappedFile(_enter(*UpperCamelCase__ , **UpperCamelCase__))
snake_case__ = fixed_enter
| 654 | 0 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCamelCase ( _lowerCamelCase ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCamelCase_ : NestedDataStructureLike[PathLike] , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : str , ) -> Dict:
super().__init__(
lowerCamelCase_ , split=lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ , streaming=lowerCamelCase_ , num_proc=lowerCamelCase_ , **lowerCamelCase_ , )
__magic_name__ : Optional[int] = field
__magic_name__ : Optional[Any] = path_or_paths if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else {self.split: path_or_paths}
__magic_name__ : Optional[Any] = Json(
cache_dir=lowerCamelCase_ , data_files=lowerCamelCase_ , features=lowerCamelCase_ , field=lowerCamelCase_ , **lowerCamelCase_ , )
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[str]:
# Build iterable dataset
if self.streaming:
__magic_name__ : Optional[Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__magic_name__ : int = None
__magic_name__ : Tuple = None
__magic_name__ : Tuple = None
__magic_name__ : Optional[int] = None
self.builder.download_and_prepare(
download_config=lowerCamelCase_ , download_mode=lowerCamelCase_ , verification_mode=lowerCamelCase_ , base_path=lowerCamelCase_ , num_proc=self.num_proc , )
__magic_name__ : Any = self.builder.as_dataset(
split=self.split , verification_mode=lowerCamelCase_ , in_memory=self.keep_in_memory )
return dataset
class lowerCamelCase :
'''simple docstring'''
def __init__( self : int , lowerCamelCase_ : Dataset , lowerCamelCase_ : Union[PathLike, BinaryIO] , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : List[Any] , ) -> List[Any]:
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' )
__magic_name__ : Optional[Any] = dataset
__magic_name__ : Tuple = path_or_buf
__magic_name__ : Dict = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__magic_name__ : Any = num_proc
__magic_name__ : Tuple = '''utf-8'''
__magic_name__ : Tuple = to_json_kwargs
def UpperCAmelCase__ ( self : Dict ) -> int:
__magic_name__ : int = self.to_json_kwargs.pop('''path_or_buf''' , lowerCamelCase_ )
__magic_name__ : Any = self.to_json_kwargs.pop('''orient''' , '''records''' )
__magic_name__ : List[Any] = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
__magic_name__ : Optional[int] = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
__magic_name__ : List[str] = self.to_json_kwargs.pop('''compression''' , lowerCamelCase_ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=lowerCamelCase_ ) as buffer:
__magic_name__ : str = self._write(file_obj=lowerCamelCase_ , orient=lowerCamelCase_ , lines=lowerCamelCase_ , index=lowerCamelCase_ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
''' was passed. Please provide a local path instead.''' )
__magic_name__ : List[str] = self._write(
file_obj=self.path_or_buf , orient=lowerCamelCase_ , lines=lowerCamelCase_ , index=lowerCamelCase_ , **self.to_json_kwargs )
return written
def UpperCAmelCase__ ( self : Optional[Any] , lowerCamelCase_ : Dict ) -> Optional[int]:
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Any = args
__magic_name__ : List[Any] = query_table(
table=self.dataset.data , key=slice(lowerCamelCase_ , offset + self.batch_size ) , indices=self.dataset._indices , )
__magic_name__ : Optional[Any] = batch.to_pandas().to_json(
path_or_buf=lowerCamelCase_ , orient=lowerCamelCase_ , lines=lowerCamelCase_ , index=lowerCamelCase_ , **lowerCamelCase_ )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def UpperCAmelCase__ ( self : int , lowerCamelCase_ : BinaryIO , lowerCamelCase_ : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Any , **lowerCamelCase_ : Optional[Any] , ) -> int:
__magic_name__ : Union[str, Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
__magic_name__ : List[str] = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(lowerCamelCase_ )
else:
__magic_name__ , __magic_name__ : Optional[Any] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowerCamelCase_ , lowerCamelCase_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(lowerCamelCase_ )
return written
| 501 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 501 | 1 |
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
lowerCAmelCase_ = datasets.utils.logging.get_logger(__name__)
@dataclass
class snake_case_ ( datasets.BuilderConfig ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 10000
SCREAMING_SNAKE_CASE : Optional[List[str]] = None
SCREAMING_SNAKE_CASE : Optional[datasets.Features] = None
class snake_case_ ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = ParquetConfig
def snake_case__( self : Union[str, Any] ) ->List[Any]:
return datasets.DatasetInfo(features=self.config.features )
def snake_case__( self : Tuple , _UpperCamelCase : Tuple ) ->Optional[Any]:
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
snake_case_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_UpperCamelCase , (str, list, tuple) ):
snake_case_ = data_files
if isinstance(_UpperCamelCase , _UpperCamelCase ):
snake_case_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
snake_case_ = [dl_manager.iter_files(_UpperCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
snake_case_ = []
for split_name, files in data_files.items():
if isinstance(_UpperCamelCase , _UpperCamelCase ):
snake_case_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
snake_case_ = [dl_manager.iter_files(_UpperCamelCase ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_UpperCamelCase ):
with open(_UpperCamelCase , '''rb''' ) as f:
snake_case_ = datasets.Features.from_arrow_schema(pq.read_schema(_UpperCamelCase ) )
break
splits.append(datasets.SplitGenerator(name=_UpperCamelCase , gen_kwargs={'''files''': files} ) )
return splits
def snake_case__( self : List[Any] , _UpperCamelCase : pa.Table ) ->pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
snake_case_ = table_cast(_UpperCamelCase , self.info.features.arrow_schema )
return pa_table
def snake_case__( self : Optional[int] , _UpperCamelCase : Tuple ) ->int:
snake_case_ = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' )
for file_idx, file in enumerate(itertools.chain.from_iterable(_UpperCamelCase ) ):
with open(_UpperCamelCase , '''rb''' ) as f:
snake_case_ = pq.ParquetFile(_UpperCamelCase )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
snake_case_ = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f'''{file_idx}_{batch_idx}''', self._cast_table(_UpperCamelCase )
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(_UpperCamelCase )}: {e}''' )
raise | 39 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : str = tempfile.mkdtemp()
# fmt: off
a__ : Tuple = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
a__ : Tuple = dict(zip(lowercase , range(len(lowercase))))
a__ : List[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
a__ : Dict = {'unk_token': '<unk>'}
a__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
a__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(lowercase) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(lowercase))
a__ : str = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
a__ : Optional[Any] = os.path.join(self.tmpdirname , lowercase)
with open(self.image_processor_file , 'w' , encoding='utf-8') as fp:
json.dump(lowercase , lowercase)
def __lowercase ( self , **lowercase) -> str:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase)
def __lowercase ( self , **lowercase) -> int:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase)
def __lowercase ( self , **lowercase) -> List[Any]:
'''simple docstring'''
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **lowercase)
def __lowercase ( self) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
a__ : Tuple = [Image.fromarray(np.moveaxis(lowercase , 0 , -1)) for x in image_inputs]
return image_inputs
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : List[Any] = self.get_tokenizer()
a__ : Optional[Any] = self.get_rust_tokenizer()
a__ : str = self.get_image_processor()
a__ : str = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase)
processor_slow.save_pretrained(self.tmpdirname)
a__ : List[Any] = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase)
a__ : Tuple = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase)
processor_fast.save_pretrained(self.tmpdirname)
a__ : Optional[Any] = CLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , lowercase)
self.assertIsInstance(processor_fast.tokenizer , lowercase)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , lowercase)
self.assertIsInstance(processor_fast.image_processor , lowercase)
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Dict = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
a__ : Optional[int] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
a__ : Any = self.get_image_processor(do_normalize=lowercase , padding_value=1.0)
a__ : int = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowercase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , lowercase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , lowercase)
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : Optional[Any] = self.get_image_processor()
a__ : str = self.get_tokenizer()
a__ : str = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase)
a__ : Optional[int] = self.prepare_image_inputs()
a__ : Optional[int] = image_processor(lowercase , return_tensors='np')
a__ : Optional[int] = processor(images=lowercase , return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : List[Any] = self.get_image_processor()
a__ : Union[str, Any] = self.get_tokenizer()
a__ : Any = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase)
a__ : Optional[Any] = 'lower newer'
a__ : Optional[int] = processor(text=lowercase)
a__ : Tuple = tokenizer(lowercase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : Optional[int] = self.get_image_processor()
a__ : Tuple = self.get_tokenizer()
a__ : Union[str, Any] = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase)
a__ : List[str] = 'lower newer'
a__ : Tuple = self.prepare_image_inputs()
a__ : List[str] = processor(text=lowercase , images=lowercase)
self.assertListEqual(list(inputs.keys()) , ['input_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(lowercase):
processor()
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Optional[int] = self.get_image_processor()
a__ : Dict = self.get_tokenizer()
a__ : Any = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase)
a__ : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ : Optional[int] = processor.batch_decode(lowercase)
a__ : int = tokenizer.batch_decode(lowercase)
self.assertListEqual(lowercase , lowercase)
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : List[Any] = self.get_image_processor()
a__ : Union[str, Any] = self.get_tokenizer()
a__ : List[str] = CLIPProcessor(tokenizer=lowercase , image_processor=lowercase)
a__ : int = 'lower newer'
a__ : List[Any] = self.prepare_image_inputs()
a__ : List[str] = processor(text=lowercase , images=lowercase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 302 | 0 |
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = 42
__magic_name__ = 42
def lowercase (_A ):
"""simple docstring"""
if not isinstance(_A , _A ):
raise TypeError('The parameter s type must be str.' )
return [s[i:] + s[:i] for i in range(len(_A ) )]
def lowercase (_A ):
"""simple docstring"""
if not isinstance(_A , _A ):
raise TypeError('The parameter s type must be str.' )
if not s:
raise ValueError('The parameter s must not be empty.' )
_lowerCAmelCase : int = all_rotations(_A )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_lowerCAmelCase : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(_A ),
}
return response
def lowercase (_A , _A ):
"""simple docstring"""
if not isinstance(_A , _A ):
raise TypeError('The parameter bwt_string type must be str.' )
if not bwt_string:
raise ValueError('The parameter bwt_string must not be empty.' )
try:
_lowerCAmelCase : Any = int(_A )
except ValueError:
raise TypeError(
'The parameter idx_original_string type must be int or passive'
' of cast to int.' )
if idx_original_string < 0:
raise ValueError('The parameter idx_original_string must not be lower than 0.' )
if idx_original_string >= len(_A ):
raise ValueError(
'The parameter idx_original_string must be lower than' ' len(bwt_string).' )
_lowerCAmelCase : Optional[int] = [''] * len(_A )
for _ in range(len(_A ) ):
for i in range(len(_A ) ):
_lowerCAmelCase : Optional[Any] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
lowerCAmelCase : Optional[int] = """Provide a string that I will generate its BWT transform: """
lowerCAmelCase : str = input(entry_msg).strip()
lowerCAmelCase : Tuple = bwt_transform(s)
print(
F'''Burrows Wheeler transform for string \'{s}\' results '''
F'''in \'{result["bwt_string"]}\''''
)
lowerCAmelCase : Optional[Any] = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
F'''Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '''
F'''we get original string \'{original_string}\''''
)
| 630 |
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = pad_token_id
_lowerCAmelCase : List[Any] = max_length
_lowerCAmelCase : Tuple = vocab
_lowerCAmelCase : str = merges
_lowerCAmelCase : List[str] = BytePairTokenizer(snake_case__ , snake_case__ , sequence_length=snake_case__ )
@classmethod
def a ( cls , snake_case__ , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = [' '.join(snake_case__ ) for m in tokenizer.bpe_ranks.keys()]
_lowerCAmelCase : Any = tokenizer.get_vocab()
return cls(snake_case__ , snake_case__ , *snake_case__ , **snake_case__ )
@classmethod
def a ( cls , snake_case__ , *snake_case__ , **snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = GPTaTokenizer.from_pretrained(snake_case__ , *snake_case__ , **snake_case__ )
return cls.from_tokenizer(snake_case__ , *snake_case__ , **snake_case__ )
@classmethod
def a ( cls , snake_case__ ):
'''simple docstring'''
return cls(**snake_case__ )
def a ( self ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def a ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
_lowerCAmelCase : str = self.tf_tokenizer(snake_case__ )
_lowerCAmelCase : str = tf.ones_like(snake_case__ )
if self.pad_token_id is not None:
# pad the tokens up to max length
_lowerCAmelCase : Optional[int] = max_length if max_length is not None else self.max_length
if max_length is not None:
_lowerCAmelCase , _lowerCAmelCase : str = pad_model_inputs(
snake_case__ , max_seq_length=snake_case__ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 630 | 1 |
snake_case : int = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
10: '''a''',
11: '''b''',
12: '''c''',
13: '''d''',
14: '''e''',
15: '''f''',
}
def __lowerCamelCase ( UpperCAmelCase_ : int ):
"""simple docstring"""
assert type(UpperCAmelCase_ ) in (int, float) and decimal == int(UpperCAmelCase_ )
a :int = int(UpperCAmelCase_ )
a :Tuple = ''''''
a :int = False
if decimal < 0:
a :int = True
decimal *= -1
while decimal > 0:
a , a :Tuple = divmod(UpperCAmelCase_ , 16 )
a :str = values[remainder] + hexadecimal
a :int = '''0x''' + hexadecimal
if negative:
a :Dict = '''-''' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 445 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a__ : Any = logging.get_logger()
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = True ) ->Dict:
print(f'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
snake_case__ = timm.create_model('levit_128s' , pretrained=UpperCAmelCase_ )
else:
snake_case__ = timm.create_model('levit_128' , pretrained=UpperCAmelCase_ )
if hidden_sizes == 1_92:
snake_case__ = timm.create_model('levit_192' , pretrained=UpperCAmelCase_ )
if hidden_sizes == 2_56:
snake_case__ = timm.create_model('levit_256' , pretrained=UpperCAmelCase_ )
if hidden_sizes == 3_84:
snake_case__ = timm.create_model('levit_384' , pretrained=UpperCAmelCase_ )
from_model.eval()
snake_case__ = LevitForImageClassificationWithTeacher(UpperCAmelCase_ ).eval()
snake_case__ = OrderedDict()
snake_case__ = from_model.state_dict()
snake_case__ = list(from_model.state_dict().keys() )
snake_case__ = list(our_model.state_dict().keys() )
print(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) )
for i in range(len(UpperCAmelCase_ ) ):
snake_case__ = weights[og_keys[i]]
our_model.load_state_dict(UpperCAmelCase_ )
snake_case__ = torch.randn((2, 3, 2_24, 2_24) )
snake_case__ = from_model(UpperCAmelCase_ )
snake_case__ = our_model(UpperCAmelCase_ ).logits
assert torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ ), "The model logits don't match the original one."
snake_case__ = name
print(UpperCAmelCase_ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
snake_case__ = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f'''Pushed {checkpoint_name}''' )
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = True ) ->Any:
snake_case__ = 'imagenet-1k-id2label.json'
snake_case__ = 10_00
snake_case__ = (1, num_labels)
snake_case__ = 'huggingface/label-files'
snake_case__ = num_labels
snake_case__ = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type='dataset' ) , 'r' ) )
snake_case__ = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
snake_case__ = idalabel
snake_case__ = {v: k for k, v in idalabel.items()}
snake_case__ = partial(UpperCAmelCase_ , num_labels=UpperCAmelCase_ , idalabel=UpperCAmelCase_ , labelaid=UpperCAmelCase_ )
snake_case__ = {
'levit-128S': 1_28,
'levit-128': 1_28,
'levit-192': 1_92,
'levit-256': 2_56,
'levit-384': 3_84,
}
snake_case__ = {
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , UpperCAmelCase_ , names_to_config[model_name] , UpperCAmelCase_ , UpperCAmelCase_ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return config, expected_shape
if __name__ == "__main__":
a__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''levit-dump-folder/''',
type=Path,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
a__ : Optional[Any] = parser.parse_args()
a__ : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 368 | 0 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
A_ : int = 'src/diffusers'
A_ : Optional[Any] = '.'
# This is to make sure the diffusers module imported is the one in the repo.
A_ : List[str] = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
A_ : List[Any] = spec.loader.load_module()
def UpperCamelCase (lowercase_: List[str] , lowercase_: List[Any] ) -> Union[str, Any]:
return line.startswith(__SCREAMING_SNAKE_CASE ) or len(__SCREAMING_SNAKE_CASE ) <= 1 or re.search(r"""^\s*\)(\s*->.*:|:)\s*$""" , __SCREAMING_SNAKE_CASE ) is not None
def UpperCamelCase (lowercase_: Optional[Any] ) -> str:
A__ : Optional[int] = object_name.split(""".""" )
A__ : Dict = 0
# First let's find the module where our object lives.
A__ : int = parts[i]
while i < len(__SCREAMING_SNAKE_CASE ) and not os.path.isfile(os.path.join(__SCREAMING_SNAKE_CASE , f"""{module}.py""" ) ):
i += 1
if i < len(__SCREAMING_SNAKE_CASE ):
A__ : List[Any] = os.path.join(__SCREAMING_SNAKE_CASE , parts[i] )
if i >= len(__SCREAMING_SNAKE_CASE ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(__SCREAMING_SNAKE_CASE , f"""{module}.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A__ : str = f.readlines()
# Now let's find the class / func in the code!
A__ : Any = ""
A__ : Optional[Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(__SCREAMING_SNAKE_CASE ) and re.search(rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__SCREAMING_SNAKE_CASE ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
A__ : Union[str, Any] = line_index
while line_index < len(__SCREAMING_SNAKE_CASE ) and _should_continue(lines[line_index] , __SCREAMING_SNAKE_CASE ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A__ : Dict = lines[start_index:line_index]
return "".join(__SCREAMING_SNAKE_CASE )
A_ : List[str] = re.compile(r'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
A_ : List[str] = re.compile(r'^\s*(\S+)->(\S+)(\s+.*|$)')
A_ : List[Any] = re.compile(r'<FILL\s+[^>]*>')
def UpperCamelCase (lowercase_: str ) -> Any:
A__ : str = code.split("""\n""" )
A__ : Union[str, Any] = 0
while idx < len(__SCREAMING_SNAKE_CASE ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__SCREAMING_SNAKE_CASE ):
return re.search(r"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def UpperCamelCase (lowercase_: int ) -> List[str]:
A__ : Optional[Any] = len(get_indent(__SCREAMING_SNAKE_CASE ) ) > 0
if has_indent:
A__ : Optional[Any] = f"""class Bla:\n{code}"""
A__ : List[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=__SCREAMING_SNAKE_CASE )
A__ : List[str] = black.format_str(__SCREAMING_SNAKE_CASE , mode=__SCREAMING_SNAKE_CASE )
A__ : Optional[Any] = style_docstrings_in_code(__SCREAMING_SNAKE_CASE )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def UpperCamelCase (lowercase_: str , lowercase_: Optional[Any]=False ) -> str:
with open(__SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A__ : List[Any] = f.readlines()
A__ : str = []
A__ : Tuple = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__SCREAMING_SNAKE_CASE ):
A__ : Union[str, Any] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
A__ : Union[str, Any] = search.groups()
A__ : int = find_code_in_diffusers(__SCREAMING_SNAKE_CASE )
A__ : List[Any] = get_indent(__SCREAMING_SNAKE_CASE )
A__ : Dict = line_index + 1 if indent == theoretical_indent else line_index + 2
A__ : Dict = theoretical_indent
A__ : Tuple = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
A__ : str = True
while line_index < len(__SCREAMING_SNAKE_CASE ) and should_continue:
line_index += 1
if line_index >= len(__SCREAMING_SNAKE_CASE ):
break
A__ : Dict = lines[line_index]
A__ : List[Any] = _should_continue(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and re.search(f"""^{indent}# End copy""" , __SCREAMING_SNAKE_CASE ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A__ : Optional[int] = lines[start_index:line_index]
A__ : Optional[Any] = "".join(__SCREAMING_SNAKE_CASE )
# Remove any nested `Copied from` comments to avoid circular copies
A__ : int = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(__SCREAMING_SNAKE_CASE ) is None]
A__ : Dict = "\n".join(__SCREAMING_SNAKE_CASE )
# Before comparing, use the `replace_pattern` on the original code.
if len(__SCREAMING_SNAKE_CASE ) > 0:
A__ : Union[str, Any] = replace_pattern.replace("""with""" , """""" ).split(""",""" )
A__ : Optional[Any] = [_re_replace_pattern.search(__SCREAMING_SNAKE_CASE ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
A__ : Optional[Any] = pattern.groups()
A__ : Optional[int] = re.sub(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if option.strip() == "all-casing":
A__ : List[Any] = re.sub(obja.lower() , obja.lower() , __SCREAMING_SNAKE_CASE )
A__ : Optional[int] = re.sub(obja.upper() , obja.upper() , __SCREAMING_SNAKE_CASE )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
A__ : Optional[int] = blackify(lines[start_index - 1] + theoretical_code )
A__ : Optional[Any] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
A__ : List[str] = lines[:start_index] + [theoretical_code] + lines[line_index:]
A__ : List[str] = start_index + 1
if overwrite and len(__SCREAMING_SNAKE_CASE ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(__SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__SCREAMING_SNAKE_CASE )
return diffs
def UpperCamelCase (lowercase_: List[str] = False ) -> Optional[Any]:
A__ : List[str] = glob.glob(os.path.join(__SCREAMING_SNAKE_CASE , """**/*.py""" ) , recursive=__SCREAMING_SNAKE_CASE )
A__ : List[Any] = []
for filename in all_files:
A__ : Tuple = is_copy_consistent(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(__SCREAMING_SNAKE_CASE ) > 0:
A__ : str = "\n".join(__SCREAMING_SNAKE_CASE )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
A_ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
A_ : Any = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 703 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A_ : Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
A_ : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64 | 0 |
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
snake_case = len(UpperCamelCase_ )
for i in range(length - 1 ):
snake_case = i
for k in range(i + 1 ,UpperCamelCase_ ):
if collection[k] < collection[least]:
snake_case = k
if least != i:
snake_case , snake_case = (collection[i], collection[least])
return collection
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Dict = input("Enter numbers separated by a comma:\n").strip()
_SCREAMING_SNAKE_CASE : str = [int(item) for item in user_input.split(",")]
print(selection_sort(unsorted))
| 550 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {
"facebook/deit-base-distilled-patch16-224": (
"https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 'deit'
def __init__( self , __snake_case=7_6_8 , __snake_case=1_2 , __snake_case=1_2 , __snake_case=3_0_7_2 , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.02 , __snake_case=1E-12 , __snake_case=2_2_4 , __snake_case=1_6 , __snake_case=3 , __snake_case=True , __snake_case=1_6 , **__snake_case , ):
super().__init__(**__snake_case )
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = image_size
snake_case = patch_size
snake_case = num_channels
snake_case = qkv_bias
snake_case = encoder_stride
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = version.parse('1.11' )
@property
def a_ ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def a_ ( self ):
return 1E-4
| 550 | 1 |
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
__a = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
__a = parser.parse_args()
if args.model_type == "bert":
__a = BertForMaskedLM.from_pretrained(args.model_name)
__a = """bert"""
else:
raise ValueError("""args.model_type should be \"bert\".""")
__a = model.state_dict()
__a = {}
for w in ["word_embeddings", "position_embeddings"]:
__a = state_dict[F'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
__a = state_dict[F'''{prefix}.embeddings.LayerNorm.{w}''']
__a = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
__a = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
__a = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
__a = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
__a = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
__a = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
__a = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
__a = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
__a = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
__a = state_dict["""cls.predictions.decoder.weight"""]
__a = state_dict["""cls.predictions.bias"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
__a = state_dict[F'''cls.predictions.transform.dense.{w}''']
__a = state_dict[F'''cls.predictions.transform.LayerNorm.{w}''']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 689 |
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
def count_of_possible_combinations(a_ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(a_ )
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
def count_of_possible_combinations_with_dp_array(
a_ , a_ ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
A =sum(
count_of_possible_combinations_with_dp_array(target - item , a_ )
for item in array )
A =answer
return answer
A =[-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(a_ , a_ )
def UpperCamelCase_ ( a_ , a_ , a_ ) ->int:
A =[0] * (target + 1)
A =1
for i in range(1 , target + 1 ):
for j in range(a_ ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__a = 3
__a = 5
__a = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 689 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCAmelCase = {"""tokenization_wav2vec2_phoneme""": ["""Wav2Vec2PhonemeCTCTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 292 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase ( self )-> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self )-> Union[str, Any]:
_A = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
_A = sd_pipe.to(_UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase )
sd_pipe.set_scheduler('sample_euler' )
_A = 'A painting of a squirrel eating a burger'
_A = torch.manual_seed(0 )
_A = sd_pipe([prompt] , generator=_UpperCamelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
_A = output.images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_A = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase ( self )-> Optional[Any]:
_A = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
_A = sd_pipe.to(_UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase )
sd_pipe.set_scheduler('sample_euler' )
_A = 'A painting of a squirrel eating a burger'
_A = torch.manual_seed(0 )
_A = sd_pipe([prompt] , generator=_UpperCamelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type='np' )
_A = output.images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_A = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def UpperCamelCase ( self )-> Optional[int]:
_A = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
_A = sd_pipe.to(_UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
_A = 'A painting of a squirrel eating a burger'
_A = torch.manual_seed(0 )
_A = sd_pipe(
[prompt] , generator=_UpperCamelCase , guidance_scale=7.5 , num_inference_steps=15 , output_type='np' , use_karras_sigmas=_UpperCamelCase , )
_A = output.images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_A = np.array(
[0.1138_1689, 0.1211_2921, 0.138_9457, 0.1254_9606, 0.124_4964, 0.1083_1517, 0.1156_2866, 0.1086_7816, 0.1049_9048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 292 | 1 |
"""simple docstring"""
import sys
from collections import defaultdict
class a :
"""simple docstring"""
def __init__( self: Tuple ):
"""simple docstring"""
A__ = []
def UpperCamelCase ( self: Dict , UpperCamelCase: Dict ):
"""simple docstring"""
return self.node_position[vertex]
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: str , UpperCamelCase: List[str] ):
"""simple docstring"""
A__ = pos
def UpperCamelCase ( self: str , UpperCamelCase: Any , UpperCamelCase: Optional[Any] , UpperCamelCase: Tuple , UpperCamelCase: List[str] ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
A__ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
A__ = 2 * start + 1
else:
A__ = 2 * start + 2
if heap[smallest_child] < heap[start]:
A__ , A__ = heap[smallest_child], positions[smallest_child]
A__ , A__ = (
heap[start],
positions[start],
)
A__ , A__ = temp, tempa
A__ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , UpperCamelCase )
self.top_to_bottom(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: List[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: Dict , UpperCamelCase: Dict ):
"""simple docstring"""
A__ = position[index]
while index != 0:
A__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
A__ = heap[parent]
A__ = position[parent]
self.set_position(position[parent] , UpperCamelCase )
else:
A__ = val
A__ = temp
self.set_position(UpperCamelCase , UpperCamelCase )
break
A__ = parent
else:
A__ = val
A__ = temp
self.set_position(UpperCamelCase , 0 )
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: int , UpperCamelCase: List[str] ):
"""simple docstring"""
A__ = len(UpperCamelCase ) // 2 - 1
for i in range(UpperCamelCase , -1 , -1 ):
self.top_to_bottom(UpperCamelCase , UpperCamelCase , len(UpperCamelCase ) , UpperCamelCase )
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: Optional[Any] , UpperCamelCase: Dict ):
"""simple docstring"""
A__ = positions[0]
A__ = sys.maxsize
self.top_to_bottom(UpperCamelCase , 0 , len(UpperCamelCase ) , UpperCamelCase )
return temp
def _snake_case ( UpperCAmelCase_ : int ):
A__ = Heap()
A__ = [0] * len(UpperCAmelCase_ )
A__ = [-1] * len(UpperCAmelCase_ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
A__ = [] # Heap of Distance of vertices from their neighboring vertex
A__ = []
for vertex in range(len(UpperCAmelCase_ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCAmelCase_ )
heap.node_position.append(UpperCAmelCase_ )
A__ = []
A__ = 1
A__ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
A__ = 0
A__ = distance
heap.heapify(UpperCAmelCase_ , UpperCAmelCase_ )
for _ in range(1 , len(UpperCAmelCase_ ) ):
A__ = heap.delete_minimum(UpperCAmelCase_ , UpperCAmelCase_ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
A__ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCAmelCase_ )]
):
A__ = distance
heap.bottom_to_top(
UpperCAmelCase_ , heap.get_position(UpperCAmelCase_ ) , UpperCAmelCase_ , UpperCAmelCase_ )
A__ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
SCREAMING_SNAKE_CASE_ : List[str] = int(input('Enter number of edges: ').strip())
SCREAMING_SNAKE_CASE_ : Any = defaultdict(list)
for _ in range(edges_number):
SCREAMING_SNAKE_CASE_ : Tuple = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 702 |
"""simple docstring"""
import math
import sys
def _snake_case ( UpperCAmelCase_ : int ):
if number != int(UpperCAmelCase_ ):
raise ValueError("""the value of input must be a natural number""" )
if number < 0:
raise ValueError("""the value of input must not be a negative number""" )
if number == 0:
return 1
A__ = [-1] * (number + 1)
A__ = 0
for i in range(1 , number + 1 ):
A__ = sys.maxsize
A__ = int(math.sqrt(UpperCAmelCase_ ) )
for j in range(1 , root + 1 ):
A__ = 1 + answers[i - (j**2)]
A__ = min(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 500 | 0 |
import string
from math import logaa
def a_ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
_lowerCamelCase : Optional[int] =document.translate(
str.maketrans('' , '' , string.punctuation ) ).replace('\n' , '' )
_lowerCamelCase : int =document_without_punctuation.split(' ' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def a_ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] =corpus.lower().translate(
str.maketrans('' , '' , string.punctuation ) ) # strip all punctuation and replace it with ''
_lowerCamelCase : int =corpus_without_punctuation.split('\n' )
_lowerCamelCase : List[str] =term.lower()
return (len([doc for doc in docs if term in doc] ), len(SCREAMING_SNAKE_CASE__ ))
def a_ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any=False ):
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError('log10(0) is undefined.' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('df must be > 0' )
elif n == 0:
raise ValueError('log10(0) is undefined.' )
return round(logaa(n / df ) , 3 )
def a_ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
return round(tf * idf , 3 )
| 464 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class A ( UpperCamelCase_ ):
UpperCamelCase__ : List[str] =['pixel_values']
def __init__( self : Any , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = True , **lowercase_ : str , ) -> None:
"""simple docstring"""
super().__init__(**lowercase_ )
_lowerCamelCase : Optional[int] =size if size is not None else {'shortest_edge': 224}
_lowerCamelCase : List[Any] =get_size_dict(lowercase_ , default_to_square=lowercase_ )
_lowerCamelCase : str =crop_size if crop_size is not None else {'height': 224, 'width': 224}
_lowerCamelCase : str =get_size_dict(lowercase_ , default_to_square=lowercase_ , param_name='crop_size' )
_lowerCamelCase : Dict =do_resize
_lowerCamelCase : int =size
_lowerCamelCase : Optional[Any] =resample
_lowerCamelCase : Optional[int] =do_center_crop
_lowerCamelCase : Tuple =crop_size
_lowerCamelCase : Optional[int] =do_rescale
_lowerCamelCase : Optional[Any] =rescale_factor
_lowerCamelCase : Union[str, Any] =do_normalize
_lowerCamelCase : Optional[int] =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_lowerCamelCase : Any =image_std if image_std is not None else OPENAI_CLIP_STD
_lowerCamelCase : Any =do_convert_rgb
def lowerCamelCase ( self : int , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Any , ) -> np.ndarray:
"""simple docstring"""
_lowerCamelCase : int =get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
_lowerCamelCase : Union[str, Any] =get_resize_output_image_size(lowercase_ , size=size['shortest_edge'] , default_to_square=lowercase_ )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def lowerCamelCase ( self : Union[str, Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Tuple , ) -> np.ndarray:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(lowercase_ , size=(size['height'], size['width']) , data_format=lowercase_ , **lowercase_ )
def lowerCamelCase ( self : str , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : int , ) -> str:
"""simple docstring"""
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def lowerCamelCase ( self : Optional[int] , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Tuple , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def lowerCamelCase ( self : List[str] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : int = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowercase_ : Union[str, Any] , ) -> PIL.Image.Image:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =do_resize if do_resize is not None else self.do_resize
_lowerCamelCase : List[str] =size if size is not None else self.size
_lowerCamelCase : Any =get_size_dict(lowercase_ , param_name='size' , default_to_square=lowercase_ )
_lowerCamelCase : str =resample if resample is not None else self.resample
_lowerCamelCase : List[Any] =do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCamelCase : Optional[Any] =crop_size if crop_size is not None else self.crop_size
_lowerCamelCase : Dict =get_size_dict(lowercase_ , param_name='crop_size' , default_to_square=lowercase_ )
_lowerCamelCase : str =do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase : Tuple =rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase : List[str] =do_normalize if do_normalize is not None else self.do_normalize
_lowerCamelCase : Tuple =image_mean if image_mean is not None else self.image_mean
_lowerCamelCase : int =image_std if image_std is not None else self.image_std
_lowerCamelCase : Tuple =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_lowerCamelCase : Any =make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_lowerCamelCase : Tuple =[convert_to_rgb(lowercase_ ) for image in images]
# All transformations expect numpy arrays.
_lowerCamelCase : Optional[Any] =[to_numpy_array(lowercase_ ) for image in images]
if do_resize:
_lowerCamelCase : Optional[Any] =[self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_center_crop:
_lowerCamelCase : Optional[int] =[self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images]
if do_rescale:
_lowerCamelCase : Optional[Any] =[self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
_lowerCamelCase : List[Any] =[self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
_lowerCamelCase : List[str] =[to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
_lowerCamelCase : Tuple ={'pixel_values': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 464 | 1 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase : int = logging.get_logger(__name__)
lowercase : List[Any] = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
}
lowercase : Dict = {
"""vocab_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"""},
"""merges_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"""},
}
lowercase : Any = {
"""ctrl""": 2_5_6,
}
lowercase : Union[str, Any] = {
"""Pregnancy""": 1_6_8_6_2_9,
"""Christianity""": 7_6_7_5,
"""Explain""": 1_0_6_4_2_3,
"""Fitness""": 6_3_4_4_0,
"""Saving""": 6_3_1_6_3,
"""Ask""": 2_7_1_7_1,
"""Ass""": 9_5_9_8_5,
"""Joke""": 1_6_3_5_0_9,
"""Questions""": 4_5_6_2_2,
"""Thoughts""": 4_9_6_0_5,
"""Retail""": 5_2_3_4_2,
"""Feminism""": 1_6_4_3_3_8,
"""Writing""": 1_1_9_9_2,
"""Atheism""": 1_9_2_2_6_3,
"""Netflix""": 4_8_6_1_6,
"""Computing""": 3_9_6_3_9,
"""Opinion""": 4_3_2_1_3,
"""Alone""": 4_4_9_6_7,
"""Funny""": 5_8_9_1_7,
"""Gaming""": 4_0_3_5_8,
"""Human""": 4_0_8_8,
"""India""": 1_3_3_1,
"""Joker""": 7_7_1_3_8,
"""Diet""": 3_6_2_0_6,
"""Legal""": 1_1_8_5_9,
"""Norman""": 4_9_3_9,
"""Tip""": 7_2_6_8_9,
"""Weight""": 5_2_3_4_3,
"""Movies""": 4_6_2_7_3,
"""Running""": 2_3_4_2_5,
"""Science""": 2_0_9_0,
"""Horror""": 3_7_7_9_3,
"""Confession""": 6_0_5_7_2,
"""Finance""": 1_2_2_5_0,
"""Politics""": 1_6_3_6_0,
"""Scary""": 1_9_1_9_8_5,
"""Support""": 1_2_6_5_4,
"""Technologies""": 3_2_5_1_6,
"""Teenage""": 6_6_1_6_0,
"""Event""": 3_2_7_6_9,
"""Learned""": 6_7_4_6_0,
"""Notion""": 1_8_2_7_7_0,
"""Wikipedia""": 3_7_5_8_3,
"""Books""": 6_6_6_5,
"""Extract""": 7_6_0_5_0,
"""Confessions""": 1_0_2_7_0_1,
"""Conspiracy""": 7_5_9_3_2,
"""Links""": 6_3_6_7_4,
"""Narcissus""": 1_5_0_4_2_5,
"""Relationship""": 5_4_7_6_6,
"""Relationships""": 1_3_4_7_9_6,
"""Reviews""": 4_1_6_7_1,
"""News""": 4_2_5_6,
"""Translation""": 2_6_8_2_0,
"""multilingual""": 1_2_8_4_0_6,
}
def A_ ( A__ ) -> Optional[Any]:
a__ : Tuple = set()
a__ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a__ : Any = char
a__ : Union[str, Any] = set(__A )
return pairs
class A__ ( __A ):
"""simple docstring"""
__A : int = VOCAB_FILES_NAMES
__A : Any = PRETRAINED_VOCAB_FILES_MAP
__A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Dict = CONTROL_CODES
def __init__( self , lowercase , lowercase , lowercase="<unk>" , **lowercase) -> List[str]:
'''simple docstring'''
super().__init__(unk_token=lowercase , **lowercase)
with open(lowercase , encoding='utf-8') as vocab_handle:
a__ : List[Any] = json.load(lowercase)
a__ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
with open(lowercase , encoding='utf-8') as merges_handle:
a__ : Tuple = merges_handle.read().split('\n')[1:-1]
a__ : List[str] = [tuple(merge.split()) for merge in merges]
a__ : Tuple = dict(zip(lowercase , range(len(lowercase))))
a__ : Dict = {}
@property
def __lowercase ( self) -> Dict:
'''simple docstring'''
return len(self.encoder)
def __lowercase ( self) -> List[str]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder)
def __lowercase ( self , lowercase) -> Union[str, Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
a__ : Dict = tuple(lowercase)
a__ : Any = tuple(list(word[:-1]) + [word[-1] + '</w>'])
a__ : Dict = get_pairs(lowercase)
if not pairs:
return token
while True:
a__ : List[Any] = min(lowercase , key=lambda lowercase: self.bpe_ranks.get(lowercase , float('inf')))
if bigram not in self.bpe_ranks:
break
a__ : Tuple = bigram
a__ : Union[str, Any] = []
a__ : Dict = 0
while i < len(lowercase):
try:
a__ : List[Any] = word.index(lowercase , lowercase)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
a__ : int = j
if word[i] == first and i < len(lowercase) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
a__ : int = tuple(lowercase)
a__ : Optional[Any] = new_word
if len(lowercase) == 1:
break
else:
a__ : int = get_pairs(lowercase)
a__ : List[str] = '''@@ '''.join(lowercase)
a__ : Optional[Any] = word[:-4]
a__ : Union[str, Any] = word
return word
def __lowercase ( self , lowercase) -> Any:
'''simple docstring'''
a__ : int = []
a__ : Any = re.findall(r'\S+\n?' , lowercase)
for token in words:
split_tokens.extend(list(self.bpe(lowercase).split(' ')))
return split_tokens
def __lowercase ( self , lowercase) -> Optional[int]:
'''simple docstring'''
return self.encoder.get(lowercase , self.encoder.get(self.unk_token))
def __lowercase ( self , lowercase) -> Optional[Any]:
'''simple docstring'''
return self.decoder.get(lowercase , self.unk_token)
def __lowercase ( self , lowercase) -> Dict:
'''simple docstring'''
a__ : Optional[Any] = ''' '''.join(lowercase).replace('@@ ' , '').strip()
return out_string
def __lowercase ( self , lowercase , lowercase = None) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowercase):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
a__ : Union[str, Any] = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
a__ : Union[str, Any] = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(lowercase , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowercase , ensure_ascii=lowercase) + '\n')
a__ : Dict = 0
with open(lowercase , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowercase: kv[1]):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!')
a__ : List[str] = token_index
writer.write(' '.join(lowercase) + '\n')
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 708 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
lowercase : str = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
lowercase : Dict = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
lowercase : Any = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/krishnap25/mauve' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/krishnap25/mauve'] , reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] , )
def __lowercase ( self , lowercase , lowercase , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase="auto" , lowercase=-1 , lowercase=0.9 , lowercase=5 , lowercase=500 , lowercase="gpt2-large" , lowercase=-1 , lowercase=1024 , lowercase=25 , lowercase=5 , lowercase=True , lowercase=25 , ) -> Tuple:
'''simple docstring'''
a__ : Dict = compute_mauve(
p_text=lowercase , q_text=lowercase , p_features=lowercase , q_features=lowercase , p_tokens=lowercase , q_tokens=lowercase , num_buckets=lowercase , pca_max_data=lowercase , kmeans_explained_var=lowercase , kmeans_num_redo=lowercase , kmeans_max_iter=lowercase , featurize_model_name=lowercase , device_id=lowercase , max_text_length=lowercase , divergence_curve_discretization_size=lowercase , mauve_scaling_factor=lowercase , verbose=lowercase , seed=lowercase , )
return out
| 392 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
A__ : str = logging.get_logger(__name__)
A__ : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
A__ : Tuple = {
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
A__ : str = {
"""junnyu/roformer_chinese_small""": 1536,
"""junnyu/roformer_chinese_base""": 1536,
"""junnyu/roformer_chinese_char_small""": 512,
"""junnyu/roformer_chinese_char_base""": 512,
"""junnyu/roformer_small_discriminator""": 128,
"""junnyu/roformer_small_generator""": 128,
}
A__ : Tuple = {
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : Dict = RoFormerTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="[UNK]" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="[PAD]" , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[MASK]" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('lowercase' , SCREAMING_SNAKE_CASE_ ) != do_lower_case
or pre_tok_state.get('strip_accents' , SCREAMING_SNAKE_CASE_ ) != strip_accents
):
__lowerCamelCase : Optional[int] = getattr(SCREAMING_SNAKE_CASE_ , pre_tok_state.pop('type' ) )
__lowerCamelCase : Union[str, Any] = do_lower_case
__lowerCamelCase : str = strip_accents
__lowerCamelCase : Optional[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = do_lower_case
def __getstate__( self ) -> List[str]:
__lowerCamelCase : Union[str, Any] = self.__dict__.copy()
__lowerCamelCase : Dict = BertPreTokenizer()
return state
def __setstate__( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
__lowerCamelCase : Optional[int] = d
__lowerCamelCase : List[Any] = self.__dict__['_tokenizer'].get_vocab()
__lowerCamelCase : Union[str, Any] = PreTokenizer.custom(JiebaPreTokenizer(SCREAMING_SNAKE_CASE_ ) )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> str:
__lowerCamelCase : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
__lowerCamelCase : List[str] = [self.sep_token_id]
__lowerCamelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
__lowerCamelCase : Optional[Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> Any:
__lowerCamelCase : Tuple = BertPreTokenizer()
return super().save_pretrained(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 13 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def a_ ( UpperCamelCase_ ):
create_state_space_tree(UpperCamelCase_ , [] , 0 )
def a_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if index == len(UpperCamelCase_ ):
print(UpperCamelCase_ )
return
create_state_space_tree(UpperCamelCase_ , UpperCamelCase_ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(UpperCamelCase_ , UpperCamelCase_ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq)
| 452 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''',
}
class a ( _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A__ : str = "focalnet"
def __init__( self , snake_case_=224 , snake_case_=4 , snake_case_=3 , snake_case_=96 , snake_case_=False , snake_case_=[192, 384, 768, 768] , snake_case_=[2, 2, 6, 2] , snake_case_=[2, 2, 2, 2] , snake_case_=[3, 3, 3, 3] , snake_case_="gelu" , snake_case_=4.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=False , snake_case_=1e-4 , snake_case_=False , snake_case_=False , snake_case_=False , snake_case_=0.02 , snake_case_=1e-5 , snake_case_=32 , snake_case_=None , snake_case_=None , **snake_case_ , ) -> Any:
super().__init__(**snake_case_ )
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = use_conv_embed
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = focal_levels
_UpperCAmelCase = focal_windows
_UpperCAmelCase = hidden_act
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = use_layerscale
_UpperCAmelCase = layerscale_value
_UpperCAmelCase = use_post_layernorm
_UpperCAmelCase = use_post_layernorm_in_modulation
_UpperCAmelCase = normalize_modulator
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = encoder_stride
_UpperCAmelCase = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=snake_case_ , out_indices=snake_case_ , stage_names=self.stage_names )
| 714 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A__ : Optional[Any] = ["image_processor", "tokenizer"]
A__ : List[Any] = "BlipImageProcessor"
A__ : List[str] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , snake_case_ , snake_case_ ) -> Any:
_UpperCAmelCase = False
super().__init__(snake_case_ , snake_case_ )
_UpperCAmelCase = self.image_processor
def __call__( self , snake_case_ = None , snake_case_ = None , snake_case_ = True , snake_case_ = False , snake_case_ = None , snake_case_ = None , snake_case_ = 0 , snake_case_ = None , snake_case_ = None , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = False , snake_case_ = True , snake_case_ = None , **snake_case_ , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
_UpperCAmelCase = self.tokenizer
_UpperCAmelCase = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
return text_encoding
# add pixel_values
_UpperCAmelCase = self.image_processor(snake_case_ , return_tensors=snake_case_ )
if text is not None:
_UpperCAmelCase = self.tokenizer(
text=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_token_type_ids=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
else:
_UpperCAmelCase = None
if text_encoding is not None:
encoding_image_processor.update(snake_case_ )
return encoding_image_processor
def __A ( self , *snake_case_ , **snake_case_ ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def __A ( self , *snake_case_ , **snake_case_ ) -> Optional[int]:
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def __A ( self ) -> Any:
_UpperCAmelCase = self.tokenizer.model_input_names
_UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 579 | 0 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class lowerCamelCase_ :
"""simple docstring"""
def __init__( self : str , _a : List[Any] , _a : Optional[int]=13 , _a : Optional[Any]=7 , _a : List[str]=False , _a : Dict=True , _a : Optional[Any]=False , _a : Tuple=True , _a : List[Any]=33 , _a : int=32 , _a : int=5 , _a : int=4 , _a : List[str]=37 , _a : str="gelu" , _a : Union[str, Any]=0.1 , _a : Any=0.1 , _a : str=512 , _a : List[str]=16 , _a : Dict=2 , _a : Any=0.02 , _a : int=3 , _a : str=4 , _a : Any=None , ) -> List[str]:
__lowerCamelCase : Union[str, Any] = parent
__lowerCamelCase : Optional[Any] = batch_size
__lowerCamelCase : int = seq_length
__lowerCamelCase : List[Any] = is_training
__lowerCamelCase : List[str] = use_input_mask
__lowerCamelCase : str = use_token_type_ids
__lowerCamelCase : List[str] = use_labels
__lowerCamelCase : str = vocab_size
__lowerCamelCase : Optional[int] = hidden_size
__lowerCamelCase : Optional[int] = num_hidden_layers
__lowerCamelCase : Optional[int] = num_attention_heads
__lowerCamelCase : int = intermediate_size
__lowerCamelCase : Any = hidden_act
__lowerCamelCase : Any = hidden_dropout_prob
__lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
__lowerCamelCase : List[str] = max_position_embeddings
__lowerCamelCase : List[Any] = type_vocab_size
__lowerCamelCase : Dict = type_sequence_label_size
__lowerCamelCase : List[Any] = initializer_range
__lowerCamelCase : Union[str, Any] = num_labels
__lowerCamelCase : str = num_choices
__lowerCamelCase : Tuple = scope
def _lowercase ( self : Tuple ) -> Union[str, Any]:
__lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase : List[str] = None
if self.use_input_mask:
__lowerCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase : Tuple = None
__lowerCamelCase : Union[str, Any] = None
__lowerCamelCase : Any = None
if self.use_labels:
__lowerCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase : Tuple = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : str ) -> List[str]:
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def _lowercase ( self : str , _a : List[Any] , _a : str , _a : Optional[int] , _a : Any , _a : Tuple , _a : Optional[int] ) -> Any:
__lowerCamelCase : List[str] = EsmModel(config=_a )
model.to(_a )
model.eval()
__lowerCamelCase : List[str] = model(_a , attention_mask=_a )
__lowerCamelCase : Tuple = model(_a )
__lowerCamelCase : Dict = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowercase ( self : Any , _a : Optional[int] , _a : Any , _a : List[str] , _a : Dict , _a : List[str] , _a : int ) -> Optional[Any]:
__lowerCamelCase : Optional[Any] = EsmForMaskedLM(config=_a )
model.to(_a )
model.eval()
__lowerCamelCase : List[str] = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : str , _a : str , _a : Union[str, Any] , _a : Dict , _a : List[Any] , _a : Any , _a : str ) -> Dict:
__lowerCamelCase : str = self.num_labels
__lowerCamelCase : Tuple = EsmForTokenClassification(config=_a )
model.to(_a )
model.eval()
__lowerCamelCase : List[Any] = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self : Dict ) -> Tuple:
__lowerCamelCase : Tuple = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,(
__lowerCamelCase
) ,
) : List[str] = config_and_inputs
__lowerCamelCase : Dict = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
a_ =False
a_ =(
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
a_ =()
a_ =(
{
"""feature-extraction""": EsmModel,
"""fill-mask""": EsmForMaskedLM,
"""text-classification""": EsmForSequenceClassification,
"""token-classification""": EsmForTokenClassification,
"""zero-shot""": EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ =True
def _lowercase ( self : Any ) -> Tuple:
__lowerCamelCase : Union[str, Any] = EsmModelTester(self )
__lowerCamelCase : Tuple = ConfigTester(self , config_class=_a , hidden_size=37 )
def _lowercase ( self : Optional[Any] ) -> Tuple:
self.config_tester.run_common_tests()
def _lowercase ( self : Any ) -> Tuple:
__lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _lowercase ( self : Any ) -> Dict:
__lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCamelCase : Optional[Any] = type
self.model_tester.create_and_check_model(*_a )
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
__lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def _lowercase ( self : Tuple ) -> int:
__lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
@slow
def _lowercase ( self : Optional[int] ) -> int:
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Tuple = EsmModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def _lowercase ( self : Any ) -> Union[str, Any]:
__lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
__lowerCamelCase : Union[str, Any] = EsmEmbeddings(config=_a )
__lowerCamelCase : int = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
__lowerCamelCase : str = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
__lowerCamelCase : Optional[Any] = create_position_ids_from_input_ids(_a , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(_a , _a ) ) )
def _lowercase ( self : List[Any] ) -> Dict:
__lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()[0]
__lowerCamelCase : int = EsmEmbeddings(config=_a )
__lowerCamelCase : Union[str, Any] = torch.empty(2 , 4 , 30 )
__lowerCamelCase : List[Any] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
__lowerCamelCase : Optional[Any] = torch.as_tensor([expected_single_positions, expected_single_positions] )
__lowerCamelCase : Tuple = embeddings.create_position_ids_from_inputs_embeds(_a )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(_a , _a ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def _lowercase ( self : str ) -> Tuple:
pass
@unittest.skip('Esm does not support embedding resizing' )
def _lowercase ( self : List[Any] ) -> Optional[int]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _lowercase ( self : Optional[Any] ) -> List[str]:
pass
@require_torch
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
@slow
def _lowercase ( self : Union[str, Any] ) -> Tuple:
with torch.no_grad():
__lowerCamelCase : List[str] = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
__lowerCamelCase : Any = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowerCamelCase : List[Any] = model(_a )[0]
__lowerCamelCase : Dict = 33
__lowerCamelCase : Tuple = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , _a )
__lowerCamelCase : Optional[int] = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
@slow
def _lowercase ( self : int ) -> List[Any]:
with torch.no_grad():
__lowerCamelCase : Optional[int] = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
__lowerCamelCase : Tuple = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__lowerCamelCase : List[str] = model(_a )[0]
# compare the actual values for a slice.
__lowerCamelCase : Optional[Any] = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
| 459 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowerCamelCase_ :
"""simple docstring"""
@staticmethod
def _lowercase ( *_a : Optional[Any] , **_a : Optional[Any] ) -> Tuple:
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
a_ =MODEL_FOR_OBJECT_DETECTION_MAPPING
def _lowercase ( self : List[Any] , _a : int , _a : str , _a : Tuple ) -> List[Any]:
__lowerCamelCase : List[str] = ObjectDetectionPipeline(model=_a , image_processor=_a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def _lowercase ( self : Optional[int] , _a : int , _a : Optional[Any] ) -> Any:
__lowerCamelCase : int = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png' , threshold=0.0 )
self.assertGreater(len(_a ) , 0 )
for detected_object in outputs:
self.assertEqual(
_a , {
'score': ANY(_a ),
'label': ANY(_a ),
'box': {'xmin': ANY(_a ), 'ymin': ANY(_a ), 'xmax': ANY(_a ), 'ymax': ANY(_a )},
} , )
import datasets
__lowerCamelCase : Optional[int] = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
__lowerCamelCase : List[str] = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
__lowerCamelCase : Dict = object_detector(_a , threshold=0.0 )
self.assertEqual(len(_a ) , len(_a ) )
for outputs in batch_outputs:
self.assertGreater(len(_a ) , 0 )
for detected_object in outputs:
self.assertEqual(
_a , {
'score': ANY(_a ),
'label': ANY(_a ),
'box': {'xmin': ANY(_a ), 'ymin': ANY(_a ), 'xmax': ANY(_a ), 'ymax': ANY(_a )},
} , )
@require_tf
@unittest.skip('Object detection not implemented in TF' )
def _lowercase ( self : Optional[Any] ) -> Dict:
pass
@require_torch
def _lowercase ( self : int ) -> Optional[int]:
__lowerCamelCase : Dict = 'hf-internal-testing/tiny-detr-mobilenetsv3'
__lowerCamelCase : str = AutoModelForObjectDetection.from_pretrained(_a )
__lowerCamelCase : Optional[Any] = AutoFeatureExtractor.from_pretrained(_a )
__lowerCamelCase : str = ObjectDetectionPipeline(model=_a , feature_extractor=_a )
__lowerCamelCase : Tuple = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=0.0 )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
] , )
__lowerCamelCase : Optional[int] = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
[
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 159, 'ymin': 120, 'xmax': 480, 'ymax': 359}},
],
] , )
@require_torch
@slow
def _lowercase ( self : Union[str, Any] ) -> Dict:
__lowerCamelCase : Tuple = 'facebook/detr-resnet-50'
__lowerCamelCase : Optional[int] = AutoModelForObjectDetection.from_pretrained(_a )
__lowerCamelCase : List[str] = AutoFeatureExtractor.from_pretrained(_a )
__lowerCamelCase : str = ObjectDetectionPipeline(model=_a , feature_extractor=_a )
__lowerCamelCase : int = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] , )
__lowerCamelCase : List[Any] = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
] , )
@require_torch
@slow
def _lowercase ( self : Tuple ) -> Optional[Any]:
__lowerCamelCase : Any = 'facebook/detr-resnet-50'
__lowerCamelCase : List[str] = pipeline('object-detection' , model=_a )
__lowerCamelCase : int = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] , )
__lowerCamelCase : List[Any] = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
],
] , )
@require_torch
@slow
def _lowercase ( self : int ) -> Tuple:
__lowerCamelCase : Optional[Any] = 0.9985
__lowerCamelCase : Optional[Any] = 'facebook/detr-resnet-50'
__lowerCamelCase : Optional[int] = pipeline('object-detection' , model=_a )
__lowerCamelCase : Dict = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=_a )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}},
] , )
@require_torch
@require_pytesseract
@slow
def _lowercase ( self : int ) -> Any:
__lowerCamelCase : Optional[int] = 'Narsil/layoutlmv3-finetuned-funsd'
__lowerCamelCase : Tuple = 0.9993
__lowerCamelCase : List[str] = pipeline('object-detection' , model=_a , threshold=_a )
__lowerCamelCase : Tuple = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
{'score': 0.9993, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
{'score': 0.9993, 'label': 'I-ANSWER', 'box': {'xmin': 294, 'ymin': 254, 'xmax': 343, 'ymax': 264}},
] , )
| 459 | 1 |
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase="None" , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ):
__A : int = parent
__A : Union[str, Any] = batch_size
__A : Dict = seq_length
__A : str = is_training
__A : Any = use_input_mask
__A : Optional[int] = use_token_type_ids
__A : List[str] = use_labels
__A : Dict = vocab_size
__A : List[str] = hidden_size
__A : Optional[int] = num_hidden_layers
__A : Any = num_attention_heads
__A : Optional[int] = intermediate_size
__A : List[Any] = hidden_act
__A : Any = hidden_dropout_prob
__A : str = attention_probs_dropout_prob
__A : Optional[Any] = max_position_embeddings
__A : List[str] = type_vocab_size
__A : List[str] = type_sequence_label_size
__A : int = initializer_range
__A : Any = num_labels
__A : str = num_choices
__A : Tuple = relative_attention
__A : Optional[Any] = position_biased_input
__A : List[str] = pos_att_type
__A : Optional[int] = scope
def __UpperCAmelCase( self ):
__A : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A : List[Any] = None
if self.use_input_mask:
__A : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__A : Optional[Any] = None
if self.use_token_type_ids:
__A : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__A : Optional[Any] = None
__A : Dict = None
__A : List[str] = None
if self.use_labels:
__A : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A : Tuple = ids_tensor([self.batch_size] , self.num_choices )
__A : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase( self ):
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __UpperCAmelCase( self ):
__A : Any = self.get_config()
__A : Union[str, Any] = 300
return config
def __UpperCAmelCase( self , __UpperCAmelCase ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : List[Any] = DebertaModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__A : Optional[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )[0]
__A : List[str] = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )[0]
__A : List[str] = model(__UpperCAmelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : List[str] = DebertaForMaskedLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__A : List[str] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : Any = self.num_labels
__A : Optional[Any] = DebertaForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__A : int = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__UpperCAmelCase )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : Dict = self.num_labels
__A : List[str] = DebertaForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__A : Any = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : List[Any] = DebertaForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__A : List[Any] = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase( self ):
__A : Tuple = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) : Dict = config_and_inputs
__A : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _a ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase_ : Optional[int] = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase_ : Any = True
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : Union[str, Any] = False
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : Tuple = False
def __UpperCAmelCase( self ):
__A : Any = DebertaModelTester(self )
__A : Any = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def __UpperCAmelCase( self ):
self.config_tester.run_common_tests()
def __UpperCAmelCase( self ):
__A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__UpperCAmelCase )
@slow
def __UpperCAmelCase( self ):
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : Optional[int] = DebertaModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class _a ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason="Model not available yet" )
def __UpperCAmelCase( self ):
pass
@slow
def __UpperCAmelCase( self ):
__A : Tuple = DebertaModel.from_pretrained("microsoft/deberta-base" )
__A : Union[str, Any] = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
__A : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__A : List[str] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0]
# compare the actual values for a slice.
__A : str = torch.tensor(
[[[-0.59_86, -0.80_55, -0.84_62], [1.44_84, -0.93_48, -0.80_59], [0.31_23, 0.00_32, -1.41_31]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCAmelCase , atol=1e-4 ) , F"{output[:, 1:4, 1:4]}" )
| 387 | import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'vocab_file': 'spiece.model'}
UpperCamelCase = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
}
}
# TODO(PVP) - this should be removed in Transformers v5
UpperCamelCase = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
UpperCamelCase = '▁'
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Tuple = VOCAB_FILES_NAMES
lowerCamelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : List[str] = ["""input_ids""", """attention_mask"""]
def __init__( self , __UpperCAmelCase , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase=100 , __UpperCAmelCase=None , __UpperCAmelCase = None , __UpperCAmelCase=True , **__UpperCAmelCase , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__A : Dict = [F"<extra_id_{i}>" for i in range(__UpperCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__A : Any = len(set(filter(lambda __UpperCAmelCase : bool("extra_id" in str(__UpperCAmelCase ) ) , __UpperCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
if legacy:
logger.warning_once(
F"You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565" )
__A : Tuple = legacy
__A : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , extra_ids=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , legacy=__UpperCAmelCase , **__UpperCAmelCase , )
__A : Optional[Any] = vocab_file
__A : List[str] = extra_ids
__A : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
@staticmethod
def __UpperCAmelCase( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
__A : Union[str, Any] = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
F" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
F" {pretrained_model_name_or_path} automatically truncating your input to"
F" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
F" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , __UpperCAmelCase , )
return max_model_length
@property
def __UpperCAmelCase( self ):
return self.sp_model.get_piece_size() + self._extra_ids
def __UpperCAmelCase( self ):
__A : List[str] = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__UpperCAmelCase )) + [1]
return ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1]
def __UpperCAmelCase( self ):
return list(
set(filter(lambda __UpperCAmelCase : bool(re.search(r"<extra_id_\d+>" , __UpperCAmelCase ) ) is not None , self.additional_special_tokens ) ) )
def __UpperCAmelCase( self ):
return [self._convert_token_to_id(__UpperCAmelCase ) for token in self.get_sentinel_tokens()]
def __UpperCAmelCase( self , __UpperCAmelCase ):
if len(__UpperCAmelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase = None ):
__A : List[str] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase = None ):
__A : List[Any] = self._add_eos_if_not_present(__UpperCAmelCase )
if token_ids_a is None:
return token_ids_a
else:
__A : str = self._add_eos_if_not_present(__UpperCAmelCase )
return token_ids_a + token_ids_a
def __getstate__( self ):
__A : Any = self.__dict__.copy()
__A : List[str] = None
return state
def __setstate__( self , __UpperCAmelCase ):
__A : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__A : Optional[int] = {}
__A : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase( self , __UpperCAmelCase , **__UpperCAmelCase ):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
__A : List[str] = SPIECE_UNDERLINE + text.replace(__UpperCAmelCase , " " )
return super().tokenize(__UpperCAmelCase , **__UpperCAmelCase )
def __UpperCAmelCase( self , __UpperCAmelCase , **__UpperCAmelCase ):
if not self.legacy:
__A : Tuple = text.startswith(__UpperCAmelCase )
if is_first:
__A : Optional[int] = text[1:]
__A : Optional[Any] = self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
if not self.legacy and not is_first and not text.startswith(" " ) and tokens[0].startswith(__UpperCAmelCase ):
__A : Tuple = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def __UpperCAmelCase( self , __UpperCAmelCase ):
if token.startswith("<extra_id_" ):
__A : Optional[Any] = re.match(r"<extra_id_(\d+)>" , __UpperCAmelCase )
__A : Optional[Any] = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(__UpperCAmelCase )
def __UpperCAmelCase( self , __UpperCAmelCase ):
if index < self.sp_model.get_piece_size():
__A : Union[str, Any] = self.sp_model.IdToPiece(__UpperCAmelCase )
else:
__A : List[Any] = F"<extra_id_{self.vocab_size - 1 - index}>"
return token
def __UpperCAmelCase( self , __UpperCAmelCase ):
__A : int = []
__A : List[Any] = ""
__A : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
__A : Tuple = True
__A : Any = []
else:
current_sub_tokens.append(__UpperCAmelCase )
__A : Optional[int] = False
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string.strip()
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase = None ):
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__A : Union[str, Any] = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , "wb" ) as fi:
__A : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
| 387 | 1 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , _lowercase : Optional[Any] , _lowercase : Tuple=3 , _lowercase : Optional[int]=7 , _lowercase : Any=True , _lowercase : Optional[int]=True , _lowercase : Optional[int]=False , _lowercase : Tuple=True , _lowercase : Dict=99 , _lowercase : Any=32 , _lowercase : int=5 , _lowercase : Any=4 , _lowercase : int=37 , _lowercase : Tuple="gelu" , _lowercase : int=0.1 , _lowercase : List[Any]=0.1 , _lowercase : int=512 , _lowercase : Optional[int]=16 , _lowercase : Union[str, Any]=2 , _lowercase : List[Any]=0.02 , _lowercase : Optional[int]=3 , _lowercase : Dict=4 , _lowercase : Optional[int]=None , ) -> Tuple:
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_input_mask
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = type_sequence_label_size
A_ = initializer_range
A_ = num_labels
A_ = num_choices
A_ = scope
def __snake_case ( self : List[str]) -> Optional[Any]:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length])
A_ = None
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
A_ = ids_tensor([self.batch_size] , self.num_choices)
A_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self : Optional[Any]) -> Union[str, Any]:
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=_lowercase , )
def __snake_case ( self : Optional[Any] , _lowercase : List[Any] , _lowercase : Optional[Any] , _lowercase : int , _lowercase : Any , _lowercase : Optional[int] , _lowercase : Dict , _lowercase : Optional[Any]) -> Any:
A_ = FalconModel(config=_lowercase)
model.to(_lowercase)
model.eval()
A_ = model(_lowercase , attention_mask=_lowercase)
A_ = model(_lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __snake_case ( self : int , _lowercase : Union[str, Any] , _lowercase : Any , _lowercase : List[str] , _lowercase : Optional[int] , _lowercase : Optional[int] , _lowercase : Dict , _lowercase : Optional[int] , _lowercase : Dict , _lowercase : int , ) -> Optional[Any]:
A_ = True
A_ = FalconModel(_lowercase)
model.to(_lowercase)
model.eval()
A_ = model(
_lowercase , attention_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , )
A_ = model(
_lowercase , attention_mask=_lowercase , encoder_hidden_states=_lowercase , )
A_ = model(_lowercase , attention_mask=_lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __snake_case ( self : Optional[int] , _lowercase : List[Any] , _lowercase : int , _lowercase : Optional[Any] , _lowercase : Tuple , _lowercase : Optional[Any] , _lowercase : List[Any] , _lowercase : int , _lowercase : Optional[int] , _lowercase : Optional[int] , ) -> Dict:
A_ = FalconForCausalLM(config=_lowercase)
model.to(_lowercase)
model.eval()
A_ = model(_lowercase , attention_mask=_lowercase , labels=_lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __snake_case ( self : Dict , _lowercase : Dict , _lowercase : List[Any] , _lowercase : Tuple , _lowercase : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : Tuple , _lowercase : List[Any] , _lowercase : List[Any] , _lowercase : Optional[int] , ) -> str:
A_ = True
A_ = True
A_ = FalconForCausalLM(config=_lowercase)
model.to(_lowercase)
model.eval()
# first forward pass
A_ = model(
_lowercase , attention_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , use_cache=_lowercase , )
A_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A_ = ids_tensor((self.batch_size, 3) , config.vocab_size)
A_ = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
A_ = torch.cat([input_ids, next_tokens] , dim=-1)
A_ = torch.cat([input_mask, next_mask] , dim=-1)
A_ = model(
_lowercase , attention_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , output_hidden_states=_lowercase , )['hidden_states'][0]
A_ = model(
_lowercase , attention_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , past_key_values=_lowercase , output_hidden_states=_lowercase , )['hidden_states'][0]
# select random slice
A_ = ids_tensor((1,) , output_from_past.shape[-1]).item()
A_ = output_from_no_past[:, -3:, random_slice_idx].detach()
A_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-3))
def __snake_case ( self : List[str]) -> Dict:
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( lowerCAmelCase ,lowerCAmelCase ,lowerCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCamelCase = (FalconForCausalLM,) if is_torch_available() else ()
_UpperCamelCase = (
{
"""feature-extraction""": FalconModel,
"""text-classification""": FalconForSequenceClassification,
"""text-generation""": FalconForCausalLM,
"""question-answering""": FalconForQuestionAnswering,
"""token-classification""": FalconForTokenClassification,
"""zero-shot""": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase = False
_UpperCamelCase = False
def __snake_case ( self : Optional[int]) -> str:
A_ = FalconModelTester(self)
A_ = ConfigTester(self , config_class=_lowercase , hidden_size=37)
def __snake_case ( self : Optional[int]) -> Any:
self.config_tester.run_common_tests()
def __snake_case ( self : str) -> str:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase)
def __snake_case ( self : Any) -> Optional[int]:
A_ , *A_ = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
A_ = alibi
self.model_tester.create_and_check_model(_lowercase , *_lowercase)
def __snake_case ( self : List[str]) -> int:
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = 3
A_ = input_dict['input_ids']
A_ = input_ids.ne(1).to(_lowercase)
A_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
A_ = FalconForSequenceClassification(_lowercase)
model.to(_lowercase)
model.eval()
A_ = model(_lowercase , attention_mask=_lowercase , labels=_lowercase)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def __snake_case ( self : int) -> Optional[int]:
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = 3
A_ = 'single_label_classification'
A_ = input_dict['input_ids']
A_ = input_ids.ne(1).to(_lowercase)
A_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
A_ = FalconForSequenceClassification(_lowercase)
model.to(_lowercase)
model.eval()
A_ = model(_lowercase , attention_mask=_lowercase , labels=_lowercase)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def __snake_case ( self : Dict) -> Optional[int]:
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = input_dict['input_ids']
A_ = FalconForCausalLM(_lowercase)
model.to(_lowercase)
model.eval()
A_ = model(_lowercase , use_cache=_lowercase)
A_ = input_ids.shape[0]
A_ = model._convert_to_rw_cache(result.past_key_values)
A_ = model._convert_cache_to_standard_format(_lowercase , _lowercase)
for layer in range(len(_lowercase)):
for tensor_idx in range(2):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3)
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4)
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx]))
def __snake_case ( self : List[Any]) -> str:
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = 3
A_ = 'multi_label_classification'
A_ = input_dict['input_ids']
A_ = input_ids.ne(1).to(_lowercase)
A_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
A_ = FalconForSequenceClassification(_lowercase)
model.to(_lowercase)
model.eval()
A_ = model(_lowercase , attention_mask=_lowercase , labels=_lowercase)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def __snake_case ( self : int) -> Dict:
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(_lowercase , 'use_cache'):
return
A_ = model_class(_lowercase).to(_lowercase)
if "use_cache" not in inputs:
A_ = True
A_ = model(**_lowercase)
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
A_ = (
getattr(_lowercase , 'decoder_layers' , _lowercase)
or getattr(_lowercase , 'num_decoder_layers' , _lowercase)
or config.num_hidden_layers
)
A_ = getattr(_lowercase , 'num_kv_heads' , config.num_attention_heads)
A_ = getattr(_lowercase , 'd_model' , config.hidden_size)
A_ = embed_dim // num_attention_heads
A_ = outputs['past_key_values']
self.assertEqual(len(_lowercase) , _lowercase)
A_ , A_ = inputs['input_ids'].shape
for i in range(_lowercase):
if config.new_decoder_architecture:
A_ = config.num_attention_heads
elif config.multi_query:
A_ = 1
self.assertEqual(len(past_kv[0]) , 2) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim))
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim))
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __snake_case ( self : List[str]) -> str:
A_ = AutoTokenizer.from_pretrained('Rocketknight1/falcon-rw-1b')
A_ = FalconForCausalLM.from_pretrained('Rocketknight1/falcon-rw-1b')
model.eval()
model.to(_lowercase)
A_ = tokenizer('My favorite food is' , return_tensors='pt').to(_lowercase)
A_ = (
'My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'
)
A_ = model.generate(**_lowercase , do_sample=_lowercase , max_new_tokens=19)
A_ = tokenizer.batch_decode(_lowercase)[0]
self.assertEqual(_lowercase , _lowercase)
@slow
def __snake_case ( self : Union[str, Any]) -> Union[str, Any]:
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
A_ = AutoTokenizer.from_pretrained(_lowercase)
A_ = FalconForCausalLM.from_pretrained(_lowercase)
model.eval()
model.to(_lowercase)
A_ = tokenizer('My favorite food is' , return_tensors='pt').to(_lowercase)
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**_lowercase , do_sample=_lowercase , max_new_tokens=4)
model.generate(**_lowercase , do_sample=_lowercase , max_new_tokens=4)
model.generate(**_lowercase , num_beams=2 , max_new_tokens=4)
@slow
def __snake_case ( self : Any) -> List[Any]:
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
A_ = AutoTokenizer.from_pretrained(_lowercase)
A_ = FalconForCausalLM.from_pretrained(_lowercase)
model.eval()
model.to(device=_lowercase)
A_ = tokenizer('My favorite food is' , return_tensors='pt').to(_lowercase)
# Test results are the same with and without cache
A_ = model.generate(**_lowercase , do_sample=_lowercase , max_new_tokens=20 , use_cache=_lowercase)
A_ = model.generate(**_lowercase , do_sample=_lowercase , max_new_tokens=20 , use_cache=_lowercase)
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0)
| 366 |
'''simple docstring'''
from PIL import Image
def lowerCamelCase( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Image:
def brightness(SCREAMING_SNAKE_CASE_ ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
_SCREAMING_SNAKE_CASE = change_brightness(img, 1_00)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 366 | 1 |
def _lowerCAmelCase( __A = 1000000 ):
UpperCAmelCase = set(range(3 , __A , 2 ) )
primes.add(2 )
for p in range(3 , __A , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , __A , __A ) ) )
UpperCAmelCase = [float(__A ) for n in range(limit + 1 )]
for p in primes:
for n in range(__A , limit + 1 , __A ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 1 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
lowerCAmelCase__ = ""
lowerCAmelCase__ = ""
lowerCAmelCase__ = ""
lowerCAmelCase__ = 1 # (0 is vertical, 1 is horizontal)
def _lowerCAmelCase( ):
UpperCAmelCase , UpperCAmelCase = get_dataset(__A , __A )
print("Processing..." )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = update_image_and_anno(__A , __A , __A )
for index, image in enumerate(__A ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCAmelCase = random_chars(32 )
UpperCAmelCase = paths[index].split(os.sep )[-1].rsplit("." , 1 )[0]
UpperCAmelCase = F"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"
cva.imwrite(F"/{file_root}.jpg" , __A , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"Success {index+1}/{len(__A )} with {file_name}" )
UpperCAmelCase = []
for anno in new_annos[index]:
UpperCAmelCase = F"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"
annos_list.append(__A )
with open(F"/{file_root}.txt" , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def _lowerCAmelCase( __A , __A ):
UpperCAmelCase = []
UpperCAmelCase = []
for label_file in glob.glob(os.path.join(__A , "*.txt" ) ):
UpperCAmelCase = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(__A ) as in_file:
UpperCAmelCase = in_file.readlines()
UpperCAmelCase = os.path.join(__A , F"{label_name}.jpg" )
UpperCAmelCase = []
for obj_list in obj_lists:
UpperCAmelCase = obj_list.rstrip("\n" ).split(" " )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__A )
labels.append(__A )
return img_paths, labels
def _lowerCAmelCase( __A , __A , __A = 1 ):
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = []
for idx in range(len(__A ) ):
UpperCAmelCase = []
UpperCAmelCase = img_list[idx]
path_list.append(__A )
UpperCAmelCase = anno_list[idx]
UpperCAmelCase = cva.imread(__A )
if flip_type == 1:
UpperCAmelCase = cva.flip(__A , __A )
for bbox in img_annos:
UpperCAmelCase = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
UpperCAmelCase = cva.flip(__A , __A )
for bbox in img_annos:
UpperCAmelCase = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__A )
new_imgs_list.append(__A )
return new_imgs_list, new_annos_lists, path_list
def _lowerCAmelCase( __A = 32 ):
assert number_char > 1, "The number of character should greater than 1"
UpperCAmelCase = ascii_lowercase + digits
return "".join(random.choice(__A ) for _ in range(__A ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 1 | 1 |
'''simple docstring'''
from PIL import Image
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
def brightness(__magic_name__ ) -> float:
return 128 + level + (c - 128)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError("level must be between -255.0 (black) and 255.0 (white)" )
return img.point(__magic_name__ )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change brightness to 100
a : List[str] = change_brightness(img, 1_00)
brigt_img.save("image_data/lena_brightness.png", format="png")
| 679 |
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
if n == 1 or not isinstance(__magic_name__ , __magic_name__ ):
return 0
elif n == 2:
return 1
else:
UpperCAmelCase : Optional[int] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = 0
UpperCAmelCase : Union[str, Any] = 2
while digits < n:
index += 1
UpperCAmelCase : Any = len(str(fibonacci(__magic_name__ ) ) )
return index
def lowercase ( __magic_name__ = 1000 ):
'''simple docstring'''
return fibonacci_digits_index(__magic_name__ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 679 | 1 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __magic_name__ :
def __init__( self , __magic_name__ , __magic_name__=3 , __magic_name__=3_2 , __magic_name__=3 , __magic_name__=1_0 , __magic_name__=[1_0, 2_0, 3_0, 4_0] , __magic_name__=[1, 1, 2, 1] , __magic_name__=True , __magic_name__=True , __magic_name__="relu" , __magic_name__=3 , __magic_name__=None , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = embeddings_size
_lowerCAmelCase = hidden_sizes
_lowerCAmelCase = depths
_lowerCAmelCase = is_training
_lowerCAmelCase = use_labels
_lowerCAmelCase = hidden_act
_lowerCAmelCase = num_labels
_lowerCAmelCase = scope
_lowerCAmelCase = len(A__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ):
"""simple docstring"""
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = TFResNetModel(config=A__ )
_lowerCAmelCase = model(A__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def _lowerCamelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ):
"""simple docstring"""
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = TFResNetForImageClassification(A__ )
_lowerCAmelCase = model(A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __magic_name__ ( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
UpperCamelCase : str = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
UpperCamelCase : str = (
{"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase : Tuple = False
UpperCamelCase : List[Any] = False
UpperCamelCase : str = False
UpperCamelCase : Tuple = False
UpperCamelCase : Any = False
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = TFResNetModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=A__ , has_text_modality=A__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self ):
"""simple docstring"""
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(A__ )
_lowerCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , A__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
def check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ ):
_lowerCAmelCase = model_class(A__ )
_lowerCAmelCase = model(**self._prepare_for_class(A__ , A__ ) )
_lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(A__ ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_lowerCAmelCase = layer_type
_lowerCAmelCase = True
check_hidden_states_output(A__ , A__ , A__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase = True
check_hidden_states_output(A__ , A__ , A__ )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A__ )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = TFResNetModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
def A__ ( ):
"""simple docstring"""
_lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=A__ , return_tensors='tf' )
# forward pass
_lowerCAmelCase = model(**A__ )
# verify the logits
_lowerCAmelCase = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , A__ )
_lowerCAmelCase = tf.constant([-11.10_69, -9.78_77, -8.37_77] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , A__ , atol=1e-4 ) )
| 700 |
"""simple docstring"""
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
a__ : Dict = logging.getLogger(__name__)
def A__ ( ):
"""simple docstring"""
_lowerCAmelCase = argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' )
parser.add_argument(
'--dataset_name', type=__lowerCamelCase, default='wikitext', help='Name of the training. Explore datasets at: hf.co/datasets.', )
parser.add_argument(
'--dataset_config', type=__lowerCamelCase, default='wikitext-103-raw-v1', help='Configuration name of the dataset.' )
parser.add_argument(
'--tokenizer_name_or_path', type=__lowerCamelCase, default='sayakpaul/unigram-tokenizer-wikitext', help='Tokenizer identifier. Can be a local filepath or a Hub identifier.', )
parser.add_argument(
'--shard_size', type=__lowerCamelCase, default=1_0_0_0, help='Number of entries to go in a single shard.', )
parser.add_argument('--split', type=__lowerCamelCase, default='train', choices=['train', 'test', 'validation'] )
parser.add_argument(
'--limit', default=__lowerCamelCase, type=__lowerCamelCase, help='Limit the number of shards (used for debugging).', )
parser.add_argument(
'--max_length', type=__lowerCamelCase, default=5_1_2, help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.', )
parser.add_argument(
'--output_dir', default='tf-tpu', type=__lowerCamelCase, help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.', )
_lowerCAmelCase = parser.parse_args()
return args
def A__ ( __lowerCamelCase ):
"""simple docstring"""
def fn(__lowerCamelCase ):
return tokenizer(examples['text'] )
return fn
def A__ ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = []
for i in range(len(tokenized_data['input_ids'] ) ):
_lowerCAmelCase = {
'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ),
'attention_mask': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ),
}
_lowerCAmelCase = tf.train.Features(feature=__lowerCamelCase )
_lowerCAmelCase = tf.train.Example(features=__lowerCamelCase )
_lowerCAmelCase = example.SerializeToString()
records.append(__lowerCamelCase )
return records
def A__ ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = datasets.load_dataset(args.dataset_name, args.dataset_config, split=args.split )
if args.limit is not None:
_lowerCAmelCase = min(len(__lowerCamelCase ), args.limit )
_lowerCAmelCase = dataset.select(range(__lowerCamelCase ) )
print(F'''Limiting the dataset to {args.limit} entries.''' )
_lowerCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
_lowerCAmelCase = os.path.join(args.output_dir, args.split )
if not os.path.exists(__lowerCamelCase ):
os.makedirs(__lowerCamelCase )
else:
_lowerCAmelCase = os.path.join(args.output_dir, args.split )
# Tokenize the whole dataset at once.
_lowerCAmelCase = tokenize_function(__lowerCamelCase )
_lowerCAmelCase = dataset.map(__lowerCamelCase, batched=__lowerCamelCase, num_proc=4, remove_columns=['text'] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(__lowerCamelCase ):
# Concatenate all texts.
_lowerCAmelCase = {k: sum(examples[k], [] ) for k in examples.keys()}
_lowerCAmelCase = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
_lowerCAmelCase = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
_lowerCAmelCase = {
k: [t[i : i + args.max_length] for i in range(0, __lowerCamelCase, args.max_length )]
for k, t in concatenated_examples.items()
}
return result
_lowerCAmelCase = dataset_tokenized.map(__lowerCamelCase, batched=__lowerCamelCase, batch_size=1_0_0_0, num_proc=4 )
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for shard in range(0, len(__lowerCamelCase ), args.shard_size ):
_lowerCAmelCase = grouped_dataset[shard : shard + args.shard_size]
_lowerCAmelCase = len(dataset_snapshot['input_ids'] )
_lowerCAmelCase = os.path.join(__lowerCamelCase, F'''dataset-{shard_count}-{records_containing}.tfrecord''' )
_lowerCAmelCase = get_serialized_examples(__lowerCamelCase )
with tf.io.TFRecordWriter(__lowerCamelCase ) as out_file:
for i in range(len(__lowerCamelCase ) ):
_lowerCAmelCase = serialized_examples[i]
out_file.write(__lowerCamelCase )
print('Wrote file {} containing {} records'.format(__lowerCamelCase, __lowerCamelCase ) )
shard_count += 1
total_records += records_containing
with open(F'''split-{args.split}-records-count.txt''', 'w' ) as f:
print(F'''Total {args.split} records: {total_records}''', file=__lowerCamelCase )
if __name__ == "__main__":
a__ : str = parse_args()
main(args)
| 309 | 0 |
from cva import destroyAllWindows, imread, imshow, waitKey
def __UpperCamelCase (lowerCAmelCase : List[str] ) -> List[Any]:
A = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(lowerCamelCase_ ):
for j in range(lowerCamelCase_ ):
A = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
_UpperCAmelCase = imread("image_data/lena.jpg", 1)
# convert to its negative
_UpperCAmelCase = convert_to_negative(img)
# show result image
imshow("negative of original image", img)
waitKey(0)
destroyAllWindows()
| 699 |
'''simple docstring'''
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase ( UpperCamelCase_ ):
def __init__( self : Union[str, Any] , a__ : int , a__ : List[Any]=768 ):
'''simple docstring'''
super().__init__(a__ )
lowerCAmelCase__ : int = proj_size
lowerCAmelCase__ : str = CLIPVisionModel(a__ )
lowerCAmelCase__ : int = PaintByExampleMapper(a__ )
lowerCAmelCase__ : Optional[int] = nn.LayerNorm(config.hidden_size )
lowerCAmelCase__ : Optional[Any] = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
lowerCAmelCase__ : Dict = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def _A ( self : Tuple , a__ : int , a__ : Optional[Any]=False ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.model(pixel_values=a__ )
lowerCAmelCase__ : Any = clip_output.pooler_output
lowerCAmelCase__ : Union[str, Any] = self.mapper(latent_states[:, None] )
lowerCAmelCase__ : List[Any] = self.final_layer_norm(a__ )
lowerCAmelCase__ : Optional[int] = self.proj_out(a__ )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class lowerCAmelCase ( nn.Module ):
def __init__( self : Optional[Any] , a__ : Optional[Any] ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ : Optional[Any] = (config.num_hidden_layers + 1) // 5
lowerCAmelCase__ : str = config.hidden_size
lowerCAmelCase__ : Dict = 1
lowerCAmelCase__ : int = nn.ModuleList(
[
BasicTransformerBlock(a__ , a__ , a__ , activation_fn="gelu" , attention_bias=a__ )
for _ in range(a__ )
] )
def _A ( self : int , a__ : Union[str, Any] ):
'''simple docstring'''
for block in self.blocks:
lowerCAmelCase__ : List[Any] = block(a__ )
return hidden_states
| 378 | 0 |
"""simple docstring"""
__magic_name__ = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
__magic_name__ = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
__magic_name__ = {
0: "Sunday",
1: "Monday",
2: "Tuesday",
3: "Wednesday",
4: "Thursday",
5: "Friday",
6: "Saturday",
}
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
assert len(str(UpperCamelCase_ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
__SCREAMING_SNAKE_CASE = year // 100
__SCREAMING_SNAKE_CASE = (5 * (century % 4) + 2) % 7
__SCREAMING_SNAKE_CASE = year % 100
__SCREAMING_SNAKE_CASE = centurian % 12
__SCREAMING_SNAKE_CASE = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__SCREAMING_SNAKE_CASE = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__SCREAMING_SNAKE_CASE = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 248 |
"""simple docstring"""
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__magic_name__ = threading.Lock()
__magic_name__ = None
__magic_name__ = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
__magic_name__ = logging.WARNING
__magic_name__ = True
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = os.getenv("""TRANSFORMERS_VERBOSITY""" , UpperCamelCase_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, "
f"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def _lowerCAmelCase ( ):
return __name__.split(""".""" )[0]
def _lowerCAmelCase ( ):
return logging.getLogger(_get_library_name() )
def _lowerCAmelCase ( ):
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
__SCREAMING_SNAKE_CASE = logging.StreamHandler() # Set sys.stderr as stream.
__SCREAMING_SNAKE_CASE = sys.stderr.flush
# Apply our default configuration to the library root logger.
__SCREAMING_SNAKE_CASE = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
__SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase ( ):
global _default_handler
with _lock:
if not _default_handler:
return
__SCREAMING_SNAKE_CASE = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
__SCREAMING_SNAKE_CASE = None
def _lowerCAmelCase ( ):
return log_levels
def _lowerCAmelCase ( UpperCamelCase_ = None ):
if name is None:
__SCREAMING_SNAKE_CASE = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(UpperCamelCase_ )
def _lowerCAmelCase ( ):
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def _lowerCAmelCase ( UpperCamelCase_ ):
_configure_library_root_logger()
_get_library_root_logger().setLevel(UpperCamelCase_ )
def _lowerCAmelCase ( ):
return set_verbosity(UpperCamelCase_ )
def _lowerCAmelCase ( ):
return set_verbosity(UpperCamelCase_ )
def _lowerCAmelCase ( ):
return set_verbosity(UpperCamelCase_ )
def _lowerCAmelCase ( ):
return set_verbosity(UpperCamelCase_ )
def _lowerCAmelCase ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def _lowerCAmelCase ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def _lowerCAmelCase ( UpperCamelCase_ ):
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(UpperCamelCase_ )
def _lowerCAmelCase ( UpperCamelCase_ ):
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(UpperCamelCase_ )
def _lowerCAmelCase ( ):
_configure_library_root_logger()
__SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase ( ):
_configure_library_root_logger()
__SCREAMING_SNAKE_CASE = True
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = _get_library_root_logger().handlers
for handler in handlers:
__SCREAMING_SNAKE_CASE = logging.Formatter("""[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s""" )
handler.setFormatter(UpperCamelCase_ )
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(UpperCamelCase_ )
def _lowerCAmelCase ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = os.getenv("""TRANSFORMERS_NO_ADVISORY_WARNINGS""" , UpperCamelCase_ )
if no_advisory_warnings:
return
self.warning(*UpperCamelCase_ , **UpperCamelCase_ )
__magic_name__ = warning_advice
@functools.lru_cache(UpperCamelCase_ )
def _lowerCAmelCase ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
self.warning(*UpperCamelCase_ , **UpperCamelCase_ )
__magic_name__ = warning_once
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__): # pylint: disable=unused-argument
__SCREAMING_SNAKE_CASE = args[0] if args else None
def __iter__( self):
return iter(self._iterator)
def __getattr__( self , lowerCAmelCase__):
def empty_fn(*lowerCAmelCase__ , **lowerCAmelCase__): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self):
return self
def __exit__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__):
return
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __call__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
if _tqdm_active:
return tqdm_lib.tqdm(*lowerCAmelCase__ , **lowerCAmelCase__)
else:
return EmptyTqdm(*lowerCAmelCase__ , **lowerCAmelCase__)
def snake_case_ ( self , *lowerCAmelCase__ , **lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowerCAmelCase__ , **lowerCAmelCase__)
def snake_case_ ( self):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__magic_name__ = _tqdm_cls()
def _lowerCAmelCase ( ):
global _tqdm_active
return bool(_tqdm_active )
def _lowerCAmelCase ( ):
global _tqdm_active
__SCREAMING_SNAKE_CASE = True
hf_hub_utils.enable_progress_bars()
def _lowerCAmelCase ( ):
global _tqdm_active
__SCREAMING_SNAKE_CASE = False
hf_hub_utils.disable_progress_bars()
| 248 | 1 |
from __future__ import annotations
import math
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
if num <= 0:
_A = F"{num}: Invalid input, please enter a positive integer."
raise ValueError(_SCREAMING_SNAKE_CASE )
_A = [True] * (num + 1)
_A = []
_A = 2
_A = int(math.sqrt(_SCREAMING_SNAKE_CASE ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(_SCREAMING_SNAKE_CASE )
# Set multiples of start be False
for i in range(start * start , num + 1 , _SCREAMING_SNAKE_CASE ):
if sieve[i] is True:
_A = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(_SCREAMING_SNAKE_CASE )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("Enter a positive integer: ").strip())))
| 27 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__A : List[Any] = "http://www.mocksite.com/file1.txt"
__A : List[Any] = "\"text\": [\"foo\", \"foo\"]"
__A : Dict = "6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8"
class lowerCamelCase:
'''simple docstring'''
__magic_name__ = 200
__magic_name__ = {'Content-Length': '100'}
__magic_name__ = {}
def lowerCAmelCase__ ( self , **snake_case_ ):
return [bytes(snake_case_ , 'utf-8' )]
def __lowerCAmelCase( *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict] )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
import requests
monkeypatch.setattr(_SCREAMING_SNAKE_CASE , 'request' , _SCREAMING_SNAKE_CASE )
_A = URL
if issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = url
elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = [url]
elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = {'train': url}
_A = 'dummy'
_A = 'downloads'
_A = tmp_path
_A = DownloadConfig(
cache_dir=os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , use_etag=_SCREAMING_SNAKE_CASE , )
_A = DownloadManager(dataset_name=_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE )
_A = dl_manager.download(_SCREAMING_SNAKE_CASE )
_A = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = [downloaded_paths]
_A = [urls]
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert "train" in downloaded_paths.keys()
_A = downloaded_paths.values()
_A = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
_A = Path(_SCREAMING_SNAKE_CASE )
_A = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
_A = downloaded_path.read_text()
assert content == CONTENT
_A = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
_A = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict] )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
_A = str(_SCREAMING_SNAKE_CASE )
if issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = filename
elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = [filename]
elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = {'train': filename}
_A = 'dummy'
_A = xz_file.parent
_A = 'extracted'
_A = DownloadConfig(
cache_dir=_SCREAMING_SNAKE_CASE , use_etag=_SCREAMING_SNAKE_CASE , )
_A = DownloadManager(dataset_name=_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE )
_A = dl_manager.extract(_SCREAMING_SNAKE_CASE )
_A = paths
for extracted_paths in [extracted_paths]:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = [extracted_paths]
_A = [paths]
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert "train" in extracted_paths.keys()
_A = extracted_paths.values()
_A = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert extracted_path == dl_manager.extracted_paths[input_path]
_A = Path(_SCREAMING_SNAKE_CASE )
_A = extracted_path.parts
assert parts[-1] == hash_url_to_filename(_SCREAMING_SNAKE_CASE , etag=_SCREAMING_SNAKE_CASE )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
_A = extracted_path.read_text()
_A = text_file.read_text()
assert extracted_file_content == expected_file_content
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
assert path.endswith('.jsonl' )
for num_items, line in enumerate(_SCREAMING_SNAKE_CASE , start=1 ):
_A = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_A = request.getfixturevalue(_SCREAMING_SNAKE_CASE )
_A = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(_SCREAMING_SNAKE_CASE ) , start=1 ):
_test_jsonl(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_A = request.getfixturevalue(_SCREAMING_SNAKE_CASE )
_A = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(_SCREAMING_SNAKE_CASE ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(_SCREAMING_SNAKE_CASE ) , start=1 ):
_test_jsonl(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert num_tar == 1
assert num_jsonl == 2
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
_A = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(_SCREAMING_SNAKE_CASE ) , start=1 ):
assert os.path.basename(_SCREAMING_SNAKE_CASE ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 27 | 1 |
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ : Any = logging.get_logger(__name__)
UpperCamelCase_ : Any = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class _lowercase ( lowerCAmelCase ):
_a : Optional[Any] = '''efficientformer'''
def __init__( self : List[str] , a : List[int] = [3, 2, 6, 4] , a : List[int] = [4_8, 9_6, 2_2_4, 4_4_8] , a : List[bool] = [True, True, True, True] , a : int = 4_4_8 , a : int = 3_2 , a : int = 4 , a : int = 7 , a : int = 5 , a : int = 8 , a : int = 4 , a : float = 0.0 , a : int = 1_6 , a : int = 3 , a : int = 3 , a : int = 3 , a : int = 2 , a : int = 1 , a : float = 0.0 , a : int = 1 , a : bool = True , a : bool = True , a : float = 1e-5 , a : str = "gelu" , a : float = 0.0_2 , a : float = 1e-12 , a : int = 2_2_4 , a : float = 1e-05 , **a : List[Any] , ):
"""simple docstring"""
super().__init__(**a )
__snake_case : Union[str, Any] =hidden_act
__snake_case : str =hidden_dropout_prob
__snake_case : Optional[int] =hidden_sizes
__snake_case : Optional[int] =num_hidden_layers
__snake_case : Optional[Any] =num_attention_heads
__snake_case : str =initializer_range
__snake_case : Optional[int] =layer_norm_eps
__snake_case : Any =patch_size
__snake_case : List[str] =num_channels
__snake_case : List[Any] =depths
__snake_case : Optional[Any] =mlp_expansion_ratio
__snake_case : Union[str, Any] =downsamples
__snake_case : int =dim
__snake_case : Tuple =key_dim
__snake_case : List[Any] =attention_ratio
__snake_case : Optional[int] =resolution
__snake_case : str =pool_size
__snake_case : int =downsample_patch_size
__snake_case : List[Any] =downsample_stride
__snake_case : Optional[Any] =downsample_pad
__snake_case : Optional[int] =drop_path_rate
__snake_case : Tuple =num_metaad_blocks
__snake_case : int =distillation
__snake_case : int =use_layer_scale
__snake_case : Any =layer_scale_init_value
__snake_case : List[Any] =image_size
__snake_case : Tuple =batch_norm_eps
| 497 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
UpperCamelCase_ : int = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
UpperCamelCase_ : Optional[int] = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split()
)
UpperCamelCase_ : List[Any] = """|""".join(sys.argv[1:])
UpperCamelCase_ : List[str] = re.compile(rF'''^({joined_dirs}).*?\.py$''')
UpperCamelCase_ : str = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 497 | 1 |
import math
import qiskit
def __lowerCamelCase ( _lowerCAmelCase = 1 , _lowerCAmelCase = 1 , _lowerCAmelCase = 1 ) -> qiskit.result.counts.Counts:
if (
isinstance(__lowercase , __lowercase )
or isinstance(__lowercase , __lowercase )
or isinstance(__lowercase , __lowercase )
):
raise TypeError("inputs must be integers." )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("inputs must be positive." )
if (
(math.floor(__lowercase ) != input_a)
or (math.floor(__lowercase ) != input_a)
or (math.floor(__lowercase ) != carry_in)
):
raise ValueError("inputs must be exact integers." )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("inputs must be less or equal to 2." )
# build registers
_UpperCAmelCase = qiskit.QuantumRegister(4 , "qr" )
_UpperCAmelCase = qiskit.ClassicalRegister(2 , "cr" )
# list the entries
_UpperCAmelCase = [input_a, input_a, carry_in]
_UpperCAmelCase = qiskit.QuantumCircuit(__lowercase , __lowercase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(__lowercase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(__lowercase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(__lowercase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , __lowercase ) # measure the last two qbits
_UpperCAmelCase = qiskit.Aer.get_backend("aer_simulator" )
_UpperCAmelCase = qiskit.execute(__lowercase , __lowercase , shots=1_000 )
return job.result().get_counts(__lowercase )
if __name__ == "__main__":
print(F'''Total sum count for state is: {quantum_full_adder(1, 1, 1)}''')
| 684 |
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def UpperCAmelCase_ ( __lowercase : int ) -> Optional[Any]:
'''simple docstring'''
return {key.lstrip("-" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def UpperCAmelCase_ ( ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = ArgumentParser(
"HuggingFace Datasets CLI tool" , usage="datasets-cli <command> [<args>]" , allow_abbrev=__lowercase )
_UpperCAmelCase = parser.add_subparsers(help="datasets-cli command helpers" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(__lowercase )
EnvironmentCommand.register_subcommand(__lowercase )
TestCommand.register_subcommand(__lowercase )
RunBeamCommand.register_subcommand(__lowercase )
DummyDataCommand.register_subcommand(__lowercase )
# Parse args
_UpperCAmelCase , _UpperCAmelCase = parser.parse_known_args()
if not hasattr(__lowercase , "func" ):
parser.print_help()
exit(1 )
_UpperCAmelCase = parse_unknown_args(__lowercase )
# Run
_UpperCAmelCase = args.func(__lowercase , **__lowercase )
service.run()
if __name__ == "__main__":
main()
| 236 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class __lowerCAmelCase ( A , A ):
UpperCamelCase = '''focalnet'''
def __init__( self : str , A : Any=2_24 , A : Optional[Any]=4 , A : List[Any]=3 , A : int=96 , A : Any=False , A : Union[str, Any]=[1_92, 3_84, 7_68, 7_68] , A : Optional[int]=[2, 2, 6, 2] , A : Any=[2, 2, 2, 2] , A : Dict=[3, 3, 3, 3] , A : List[str]="gelu" , A : Dict=4.0 , A : List[str]=0.0 , A : List[Any]=0.1 , A : Optional[int]=False , A : Union[str, Any]=1E-4 , A : Any=False , A : Dict=False , A : Dict=False , A : List[Any]=0.0_2 , A : Union[str, Any]=1E-5 , A : Optional[Any]=32 , A : Union[str, Any]=None , A : Dict=None , **A : Optional[Any] , ) -> int:
"""simple docstring"""
super().__init__(**A)
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = use_conv_embed
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = focal_levels
_UpperCAmelCase = focal_windows
_UpperCAmelCase = hidden_act
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = use_layerscale
_UpperCAmelCase = layerscale_value
_UpperCAmelCase = use_post_layernorm
_UpperCAmelCase = use_post_layernorm_in_modulation
_UpperCAmelCase = normalize_modulator
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = encoder_stride
_UpperCAmelCase = ['stem'] + [F"stage{idx}" for idx in range(1 , len(self.depths) + 1)]
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=A , out_indices=A , stage_names=self.stage_names)
| 639 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = IFInpaintingPipeline
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
UpperCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _lowerCamelCase ( self : List[str]) -> Tuple:
"""simple docstring"""
return self._get_dummy_components()
def _lowerCamelCase ( self : Any , A : int , A : Dict=0) -> Tuple:
"""simple docstring"""
if str(A).startswith('mps'):
_UpperCAmelCase = torch.manual_seed(A)
else:
_UpperCAmelCase = torch.Generator(device=A).manual_seed(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A)).to(A)
_UpperCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def _lowerCamelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA')
def _lowerCamelCase ( self : List[str]) -> Any:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1)
def _lowerCamelCase ( self : Optional[int]) -> Tuple:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def _lowerCamelCase ( self : str) -> List[str]:
"""simple docstring"""
self._test_save_load_local()
def _lowerCamelCase ( self : int) -> Tuple:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 639 | 1 |
"""simple docstring"""
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('''ignore''', category=UserWarning, module='''torch.optim.lr_scheduler''')
class UpperCamelCase :
def __init__( self , snake_case__ , snake_case__ , snake_case__ = True , snake_case__ = False ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = scheduler
_SCREAMING_SNAKE_CASE : List[Any] = optimizers if isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) else [optimizers]
_SCREAMING_SNAKE_CASE : List[Any] = split_batches
_SCREAMING_SNAKE_CASE : Optional[int] = step_with_optimizer
_SCREAMING_SNAKE_CASE : Union[str, Any] = GradientState()
def __SCREAMING_SNAKE_CASE ( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
_SCREAMING_SNAKE_CASE : List[str] = AcceleratorState().num_processes
for _ in range(SCREAMING_SNAKE_CASE_ ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , "total_steps" ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
else:
self.scheduler.step(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
return self.scheduler.get_last_lr()
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
return self.scheduler.state_dict()
def __SCREAMING_SNAKE_CASE ( self , snake_case__ ):
"""simple docstring"""
self.scheduler.load_state_dict(SCREAMING_SNAKE_CASE_ )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
return self.scheduler.get_lr()
def __SCREAMING_SNAKE_CASE ( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
return self.scheduler.print_lr(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 572 | from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def __UpperCamelCase ( A ):
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
__magic_name__ ='''
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
'''
class _A ( __UpperCamelCase ):
@staticmethod
def _a (SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=SCREAMING_SNAKE_CASE_ , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(F"Loading model {model_type}" )
UpperCamelCase__ = model_type
UpperCamelCase__ = tf_checkpoint
UpperCamelCase__ = pytorch_dump_output
UpperCamelCase__ = config
UpperCamelCase__ = finetuning_task_name
def _a (self ) -> Tuple:
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE_ )
if "ckpt" in self._tf_checkpoint.lower():
UpperCamelCase__ = self._tf_checkpoint
UpperCamelCase__ = ''''''
else:
UpperCamelCase__ = self._tf_checkpoint
UpperCamelCase__ = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
SCREAMING_SNAKE_CASE_ , self._config , self._pytorch_dump_output , SCREAMING_SNAKE_CASE_ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE_ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE_ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 415 | 0 |
def _UpperCAmelCase ( UpperCAmelCase : int , UpperCAmelCase : int ):
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
__lowerCamelCase : Tuple = str(bin(UpperCAmelCase ) )[2:] # remove the leading "0b"
__lowerCamelCase : List[str] = str(bin(UpperCAmelCase ) )[2:] # remove the leading "0b"
__lowerCamelCase : Union[str, Any] = max(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
return "0b" + "".join(
str(int(char_a == """1""" and char_b == """1""" ) )
for char_a, char_b in zip(a_binary.zfill(UpperCAmelCase ) , b_binary.zfill(UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 |
def _UpperCAmelCase ( ):
"""simple docstring"""
__lowerCamelCase : Optional[int] = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
__lowerCamelCase : str = 6
__lowerCamelCase : Optional[int] = 1
__lowerCamelCase : Optional[int] = 1_901
__lowerCamelCase : Optional[Any] = 0
while year < 2_001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
__lowerCamelCase : Tuple = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
__lowerCamelCase : str = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
__lowerCamelCase : Any = day - days_per_month[month - 2]
if month > 12:
year += 1
__lowerCamelCase : Optional[Any] = 1
if year < 2_001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 458 | 0 |
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
lowercase = logging.get_logger(__name__)
lowercase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase = {
'''vocab_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json''',
},
'''merges_file''': {
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''Salesforce/codegen-350M-mono''': (
'''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json'''
),
},
}
lowercase = {
'''Salesforce/codegen-350M-mono''': 2_0_4_8,
}
class __A( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE = CodeGenTokenizer
def __init__( self : str , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Any=None , __UpperCamelCase : Any=None , __UpperCamelCase : Optional[Any]="<|endoftext|>" , __UpperCamelCase : Optional[Any]="<|endoftext|>" , __UpperCamelCase : Optional[Any]="<|endoftext|>" , __UpperCamelCase : str=False , **__UpperCamelCase : List[Any] , ):
super().__init__(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
if kwargs.pop("""add_bos_token""" , SCREAMING_SNAKE_CASE__ ):
lowerCamelCase_ = kwargs.pop("""name_or_path""" , """""" )
raise ValueError(
"""Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token."""
"""Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n"""
F'''`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n'''
F'''`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n'''
"""This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."""
""" so that the fast tokenizer works correctly.""" )
lowerCamelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
lowerCamelCase_ = getattr(SCREAMING_SNAKE_CASE__ , pre_tok_state.pop("""type""" ) )
lowerCamelCase_ = add_prefix_space
lowerCamelCase_ = pre_tok_class(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase_ = add_prefix_space
def lowercase__ ( self : Optional[int] , *__UpperCamelCase : List[str] , **__UpperCamelCase : int ):
lowerCamelCase_ = kwargs.get("""is_split_into_words""" , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def lowercase__ ( self : Optional[int] , *__UpperCamelCase : str , **__UpperCamelCase : Union[str, Any] ):
lowerCamelCase_ = kwargs.get("""is_split_into_words""" , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
lowerCamelCase_ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
def lowercase__ ( self : Optional[int] , __UpperCamelCase : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , __UpperCamelCase : bool = False , __UpperCamelCase : bool = None , __UpperCamelCase : Optional[List[str]] = None , **__UpperCamelCase : Any , ):
lowerCamelCase_ = super().decode(
token_ids=SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
if truncate_before_pattern is not None and len(SCREAMING_SNAKE_CASE__ ) > 0:
lowerCamelCase_ = self.truncate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return decoded_text
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Any ):
def find_re(__UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict ):
lowerCamelCase_ = pattern.search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return m.start() if m else -1
lowerCamelCase_ = [re.compile(SCREAMING_SNAKE_CASE__ , re.MULTILINE ) for pattern in truncate_before_pattern]
lowerCamelCase_ = list(re.finditer("""^print""" , SCREAMING_SNAKE_CASE__ , re.MULTILINE ) )
if len(SCREAMING_SNAKE_CASE__ ) > 1:
lowerCamelCase_ = completion[: prints[1].start()]
lowerCamelCase_ = list(re.finditer("""^def""" , SCREAMING_SNAKE_CASE__ , re.MULTILINE ) )
if len(SCREAMING_SNAKE_CASE__ ) > 1:
lowerCamelCase_ = completion[: defs[1].start()]
lowerCamelCase_ = 0
lowerCamelCase_ = [
pos for pos in [find_re(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for terminal in terminals] if pos != -1
]
if len(SCREAMING_SNAKE_CASE__ ) > 0:
return completion[: min(SCREAMING_SNAKE_CASE__ )]
else:
return completion
| 272 | import collections
import importlib.util
import os
import re
from pathlib import Path
_lowercase : List[Any] ='''src/transformers'''
# Matches is_xxx_available()
_lowercase : List[str] =re.compile(R'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
_lowercase : Any =re.compile(R'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_lowercase : Optional[int] =re.compile(R'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
_lowercase : int =re.compile(R'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
_lowercase : Tuple =re.compile(R'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_lowercase : str =re.compile(R'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
_lowercase : List[Any] =re.compile('''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
_lowercase : List[Any] =re.compile('''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
_lowercase : List[str] =re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
_lowercase : Any =re.compile(R'''^\s*try:''')
# Catches a line with else:
_lowercase : Optional[int] =re.compile(R'''^\s*else:''')
def A__ ( lowercase: int ) -> Optional[Any]:
if _re_test_backend.search(lowercase ) is None:
return None
A : List[str] =[b[0] for b in _re_backend.findall(lowercase )]
backends.sort()
return "_and_".join(lowercase )
def A__ ( lowercase: Tuple ) -> int:
with open(lowercase, 'r', encoding='utf-8', newline='\n' ) as f:
A : str =f.readlines()
A : List[str] =0
while line_index < len(lowercase ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowercase ):
return None
# First grab the objects without a specific backend in _import_structure
A : Union[str, Any] =[]
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
A : Union[str, Any] =lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowercase ):
A : List[str] =_re_one_line_import_struct.search(lowercase ).groups()[0]
A : Optional[int] =re.findall('\[([^\]]+)\]', lowercase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
A : int =_re_import_struct_key_value.search(lowercase )
if single_line_import_search is not None:
A : List[str] =[obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(lowercase ) > 0]
objects.extend(lowercase )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
A : Optional[int] ={'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
A : Dict =find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A : int =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A : str =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
A : List[Any] =lines[line_index]
if _re_import_struct_add_one.search(lowercase ) is not None:
objects.append(_re_import_struct_add_one.search(lowercase ).groups()[0] )
elif _re_import_struct_add_many.search(lowercase ) is not None:
A : List[str] =_re_import_struct_add_many.search(lowercase ).groups()[0].split(', ' )
A : Optional[Any] =[obj[1:-1] for obj in imports if len(lowercase ) > 0]
objects.extend(lowercase )
elif _re_between_brackets.search(lowercase ) is not None:
A : int =_re_between_brackets.search(lowercase ).groups()[0].split(', ' )
A : List[str] =[obj[1:-1] for obj in imports if len(lowercase ) > 0]
objects.extend(lowercase )
elif _re_quote_object.search(lowercase ) is not None:
objects.append(_re_quote_object.search(lowercase ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
A : Optional[Any] =objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
A : int =[]
while (
line_index < len(lowercase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
A : List[str] =lines[line_index]
A : Optional[int] =_re_import.search(lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
A : Dict ={'none': objects}
# Let's continue with backend-specific objects
while line_index < len(lowercase ):
# If the line is an if is_backend_available, we grab all objects associated.
A : Optional[Any] =find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A : str =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A : List[Any] =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
A : List[str] =lines[line_index]
A : Optional[Any] =_re_import.search(lowercase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
A : Any =objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def A__ ( lowercase: Dict, lowercase: str ) -> int:
def find_duplicates(lowercase: int ):
return [k for k, v in collections.Counter(lowercase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
A : Dict =[]
for key in import_dict_objects.keys():
A : Optional[Any] =find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' )
A : str =find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
A : Tuple ='base imports' if key == 'none' else F'{key} backend'
errors.append(F'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def A__ ( ) -> int:
A : List[str] =[]
for root, _, files in os.walk(lowercase ):
if "__init__.py" in files:
A : Optional[int] =os.path.join(lowercase, '__init__.py' )
A : str =parse_init(lowercase )
if objects is not None:
A : Union[str, Any] =analyze_results(*lowercase )
if len(lowercase ) > 0:
A : Optional[int] =F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append('\n'.join(lowercase ) )
if len(lowercase ) > 0:
raise ValueError('\n\n'.join(lowercase ) )
def A__ ( ) -> Dict:
A : List[Any] =[]
for path, directories, files in os.walk(lowercase ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(lowercase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowercase ) / folder).glob('*.py' ) ) ) == 0:
continue
A : List[Any] =str((Path(lowercase ) / folder).relative_to(lowercase ) )
A : Union[str, Any] =short_path.replace(os.path.sep, '.' )
submodules.append(lowercase )
for fname in files:
if fname == "__init__.py":
continue
A : int =str((Path(lowercase ) / fname).relative_to(lowercase ) )
A : str =short_path.replace('.py', '' ).replace(os.path.sep, '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(lowercase )
return submodules
_lowercase : Dict =[
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
]
def A__ ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
A : Optional[Any] =importlib.util.spec_from_file_location(
'transformers', os.path.join(lowercase, '__init__.py' ), submodule_search_locations=[PATH_TO_TRANSFORMERS], )
A : List[Any] =spec.loader.load_module()
A : Union[str, Any] =[
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(lowercase ) > 0:
A : List[Any] ='\n'.join(F'- {module}' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F'{list_of_modules}\n'
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 305 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class __snake_case :
def __init__( self , _A , _A=13 , _A=7 , _A=True , _A=True , _A=True , _A=99 , _A=32 , _A=5 , _A=4 , _A=37 , _A="gelu" , _A=0.1 , _A=0.1 , _A=512 , _A=16 , _A=2 , _A=0.0_2 , _A=3 , _A=4 , _A=None , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_token_type_ids
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = num_choices
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = self.vocab_size - 1
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE_ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_choices)
SCREAMING_SNAKE_CASE_ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2)
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCAmelCase__ ( self , _A , _A , _A , _A , *_A):
SCREAMING_SNAKE_CASE_ = OpenAIGPTModel(config=_A)
model.to(_A)
model.eval()
SCREAMING_SNAKE_CASE_ = model(_A , token_type_ids=_A , head_mask=_A)
SCREAMING_SNAKE_CASE_ = model(_A , token_type_ids=_A)
SCREAMING_SNAKE_CASE_ = model(_A)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def lowerCAmelCase__ ( self , _A , _A , _A , _A , *_A):
SCREAMING_SNAKE_CASE_ = OpenAIGPTLMHeadModel(_A)
model.to(_A)
model.eval()
SCREAMING_SNAKE_CASE_ = model(_A , token_type_ids=_A , labels=_A)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def lowerCAmelCase__ ( self , _A , _A , _A , _A , *_A):
SCREAMING_SNAKE_CASE_ = OpenAIGPTDoubleHeadsModel(_A)
model.to(_A)
model.eval()
SCREAMING_SNAKE_CASE_ = model(_A , token_type_ids=_A , labels=_A)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def lowerCAmelCase__ ( self , _A , _A , _A , _A , *_A):
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = OpenAIGPTForSequenceClassification(_A)
model.to(_A)
model.eval()
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE_ = model(_A , token_type_ids=_A , labels=_A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE_
) = config_and_inputs
SCREAMING_SNAKE_CASE_ = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class __snake_case ( A_ , A_ , A_ , unittest.TestCase ):
__lowerCAmelCase : Dict = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
__lowerCAmelCase : Dict = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
__lowerCAmelCase : List[Any] = (
{
'''feature-extraction''': OpenAIGPTModel,
'''text-classification''': OpenAIGPTForSequenceClassification,
'''text-generation''': OpenAIGPTLMHeadModel,
'''zero-shot''': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCAmelCase__ ( self , _A , _A , _A , _A , _A):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def lowerCAmelCase__ ( self , _A , _A , _A=False):
SCREAMING_SNAKE_CASE_ = super()._prepare_for_class(_A , _A , return_labels=_A)
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
SCREAMING_SNAKE_CASE_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=_A , )
SCREAMING_SNAKE_CASE_ = inputs_dict["""labels"""]
SCREAMING_SNAKE_CASE_ = inputs_dict["""labels"""]
SCREAMING_SNAKE_CASE_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=_A , )
SCREAMING_SNAKE_CASE_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A)
return inputs_dict
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = OpenAIGPTModelTester(self)
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_A , n_embd=37)
def lowerCAmelCase__ ( self):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_A)
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_A)
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_A)
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_A)
@slow
def lowerCAmelCase__ ( self):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = OpenAIGPTModel.from_pretrained(_A)
self.assertIsNotNone(_A)
@require_torch
class __snake_case ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt')
model.to(_A)
SCREAMING_SNAKE_CASE_ = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=_A) # the president is
SCREAMING_SNAKE_CASE_ = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
SCREAMING_SNAKE_CASE_ = model.generate(_A , do_sample=_A)
self.assertListEqual(output_ids[0].tolist() , _A)
| 708 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
UpperCamelCase__ : int = Lock()
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_SCREAMING_SNAKE_CASE )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
SCREAMING_SNAKE_CASE_ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
SCREAMING_SNAKE_CASE_ = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_SCREAMING_SNAKE_CASE )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
SCREAMING_SNAKE_CASE_ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
SCREAMING_SNAKE_CASE_ = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# after all swaps are performed, send the values back to main
result_pipe[1].send(_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
SCREAMING_SNAKE_CASE_ = Pipe()
SCREAMING_SNAKE_CASE_ = Pipe()
process_array_.append(
Process(
target=_SCREAMING_SNAKE_CASE , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
SCREAMING_SNAKE_CASE_ = temp_rs
SCREAMING_SNAKE_CASE_ = temp_rr
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
SCREAMING_SNAKE_CASE_ = Pipe()
SCREAMING_SNAKE_CASE_ = Pipe()
process_array_.append(
Process(
target=_SCREAMING_SNAKE_CASE , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
SCREAMING_SNAKE_CASE_ = temp_rs
SCREAMING_SNAKE_CASE_ = temp_rr
process_array_.append(
Process(
target=_SCREAMING_SNAKE_CASE , args=(
len(_SCREAMING_SNAKE_CASE ) - 1,
arr[len(_SCREAMING_SNAKE_CASE ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_SCREAMING_SNAKE_CASE ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(_SCREAMING_SNAKE_CASE ) ):
SCREAMING_SNAKE_CASE_ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def _UpperCAmelCase ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = odd_even_transposition(_SCREAMING_SNAKE_CASE )
print('Sorted List\n' )
print(*_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 620 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : Optional[Any] = {
'''configuration_encodec''': [
'''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EncodecConfig''',
],
'''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
'''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EncodecModel''',
'''EncodecPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
a : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 633 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple ) ->Optional[Any]:
'''simple docstring'''
a : Any = []
a : List[str] = set({"(", "[", "{"} )
a : int = set({")", "]", "}"} )
a : int = {"{": "}", "[": "]", "(": ")"}
for i in range(len(_lowercase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_lowercase ) == 0 or (len(_lowercase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_lowercase ) == 0
def _SCREAMING_SNAKE_CASE ( ) ->Tuple:
'''simple docstring'''
a : Any = input("Enter sequence of brackets: " )
if is_balanced(_lowercase ):
print(_lowercase , "is balanced" )
else:
print(_lowercase , "is not balanced" )
if __name__ == "__main__":
main()
| 633 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/config.json""",
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Optional[Any] = 'xglm'
_UpperCamelCase : str = ['past_key_values']
_UpperCamelCase : Union[str, Any] = {
'num_attention_heads': 'attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Any , a : List[Any]=256_008 , a : List[str]=2_048 , a : List[str]=1_024 , a : int=4_096 , a : int=24 , a : List[str]=16 , a : Dict="gelu" , a : Optional[int]=0.1 , a : Optional[Any]=0.1 , a : Optional[Any]=0.0 , a : int=0.0 , a : Optional[int]=0.02 , a : Union[str, Any]=True , a : List[Any]=True , a : Any=2 , a : Dict=1 , a : str=0 , a : Tuple=2 , **a : Optional[Any] , )-> List[Any]:
"""simple docstring"""
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = d_model
lowercase__ = ffn_dim
lowercase__ = num_layers
lowercase__ = attention_heads
lowercase__ = activation_function
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = layerdrop
lowercase__ = init_std
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = use_cache
super().__init__(
pad_token_id=a , bos_token_id=a , eos_token_id=a , decoder_start_token_id=a , **a , )
| 45 |
class SCREAMING_SNAKE_CASE : # Public class to implement a graph
def __init__( self : int , a : int , a : int , a : list[list[bool]] )-> None:
"""simple docstring"""
lowercase__ = row
lowercase__ = col
lowercase__ = graph
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : int , a : int , a : list[list[bool]] )-> bool:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : int , a : int , a : list[list[bool]] )-> None:
"""simple docstring"""
lowercase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
lowercase__ = [-1, 0, 1, -1, 1, -1, 0, 1]
lowercase__ = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , a ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> int: # And finally, count all islands.
"""simple docstring"""
lowercase__ = [[False for j in range(self.COL )] for i in range(self.ROW )]
lowercase__ = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(a , a , a )
count += 1
return count
| 45 | 1 |
A__ = 8.3144598
def _lowerCamelCase ( a_ : float , a_ : float):
if temperature < 0:
raise Exception('''Temperature cannot be less than 0 K''')
if molar_mass <= 0:
raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''')
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
A__ = 300
A__ = 28
A__ = rms_speed_of_molecule(temperature, molar_mass)
print(F'Vrms of Nitrogen gas at 300 K is {vrms} m/s')
| 166 | import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class _lowerCAmelCase :
def __init__( self : Any , __snake_case : int ):
lowerCamelCase :Union[str, Any] = data
lowerCamelCase :Optional[int] = [0X67_45_23_01, 0Xef_cd_ab_89, 0X98_ba_dc_fe, 0X10_32_54_76, 0Xc3_d2_e1_f0]
@staticmethod
def snake_case ( __snake_case : List[str] , __snake_case : List[Any] ):
return ((n << b) | (n >> (32 - b))) & 0Xff_ff_ff_ff
def snake_case ( self : Optional[int] ):
lowerCamelCase :Union[str, Any] = B'''\x80''' + B'''\x00''' * (63 - (len(self.data ) + 8) % 64)
lowerCamelCase :List[Any] = self.data + padding + struct.pack('''>Q''' , 8 * len(self.data ) )
return padded_data
def snake_case ( self : Optional[Any] ):
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def snake_case ( self : Optional[Any] , __snake_case : str ):
lowerCamelCase :Union[str, Any] = list(struct.unpack('''>16L''' , __snake_case ) ) + [0] * 64
for i in range(16 , 80 ):
lowerCamelCase :int = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def snake_case ( self : int ):
lowerCamelCase :Optional[Any] = self.padding()
lowerCamelCase :str = self.split_blocks()
for block in self.blocks:
lowerCamelCase :int = self.expand_block(__snake_case )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :List[Any] = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
lowerCamelCase :int = (b & c) | ((~b) & d)
lowerCamelCase :Any = 0X5a_82_79_99
elif 20 <= i < 40:
lowerCamelCase :Optional[Any] = b ^ c ^ d
lowerCamelCase :Optional[Any] = 0X6e_d9_eb_a1
elif 40 <= i < 60:
lowerCamelCase :List[Any] = (b & c) | (b & d) | (c & d)
lowerCamelCase :List[str] = 0X8f_1b_bc_dc
elif 60 <= i < 80:
lowerCamelCase :Optional[Any] = b ^ c ^ d
lowerCamelCase :Dict = 0Xca_62_c1_d6
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :Any = (
self.rotate(__snake_case , 5 ) + f + e + k + expanded_block[i] & 0Xff_ff_ff_ff,
a,
self.rotate(__snake_case , 30 ),
c,
d,
)
lowerCamelCase :List[str] = (
self.h[0] + a & 0Xff_ff_ff_ff,
self.h[1] + b & 0Xff_ff_ff_ff,
self.h[2] + c & 0Xff_ff_ff_ff,
self.h[3] + d & 0Xff_ff_ff_ff,
self.h[4] + e & 0Xff_ff_ff_ff,
)
return ("{:08x}" * 5).format(*self.h )
def _lowerCamelCase ( ):
lowerCamelCase :Any = B'''Test String'''
assert SHAaHash(a_).final_hash() == hashlib.shaa(a_).hexdigest() # noqa: S324
def _lowerCamelCase ( ):
lowerCamelCase :str = argparse.ArgumentParser(description='''Process some strings or files''')
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''')
lowerCamelCase :Optional[Any] = parser.parse_args()
lowerCamelCase :Union[str, Any] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''') as f:
lowerCamelCase :Tuple = f.read()
else:
lowerCamelCase :Optional[Any] = bytes(a_ , '''utf-8''')
print(SHAaHash(a_).final_hash())
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 166 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : int = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[int] = [
"IBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"IBertForMaskedLM",
"IBertForMultipleChoice",
"IBertForQuestionAnswering",
"IBertForSequenceClassification",
"IBertForTokenClassification",
"IBertModel",
"IBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
__lowerCamelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 459 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__lowerCamelCase : List[str] = logging.get_logger(__name__)
class UpperCAmelCase ( _lowercase ):
def __init__(self : Any , A__ : int , A__ : int , A__ : float , **A__ : int ) -> List[str]:
lowercase = feature_size
lowercase = sampling_rate
lowercase = padding_value
lowercase = kwargs.pop("padding_side" , "right" )
lowercase = kwargs.pop("return_attention_mask" , A__ )
super().__init__(**A__ )
def UpperCAmelCase__ (self : Tuple , A__ : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , A__ : Union[bool, str, PaddingStrategy] = True , A__ : Optional[int] = None , A__ : bool = False , A__ : Optional[int] = None , A__ : Optional[bool] = None , A__ : Optional[Union[str, TensorType]] = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(A__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
lowercase = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
f' to this method that includes {self.model_input_names[0]}, but you provided'
f' {list(processed_features.keys() )}' )
lowercase = processed_features[self.model_input_names[0]]
lowercase = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(A__ ) == 0:
if return_attention_mask:
lowercase = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
lowercase = required_input[0]
if isinstance(A__ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
lowercase = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(A__ ):
lowercase = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(A__ ):
lowercase = "tf"
elif is_torch_tensor(A__ ):
lowercase = "pt"
elif isinstance(A__ , (int, float, list, tuple, np.ndarray) ):
lowercase = "np"
else:
raise ValueError(
f'type of {first_element} unknown: {type(A__ )}. '
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
lowercase = to_numpy(A__ )
else:
lowercase = [to_numpy(A__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
lowercase = self._get_padding_strategies(padding=A__ , max_length=A__ )
lowercase = processed_features[self.model_input_names[0]]
lowercase = len(A__ )
if not all(len(A__ ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
lowercase = []
for i in range(A__ ):
lowercase = {k: v[i] for k, v in processed_features.items()}
# truncation
lowercase = self._truncate(
A__ , max_length=A__ , pad_to_multiple_of=A__ , truncation=A__ , )
truncated_inputs.append(A__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
lowercase = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
lowercase = PaddingStrategy.MAX_LENGTH
lowercase = {}
for i in range(A__ ):
# padding
lowercase = self._pad(
truncated_inputs[i] , max_length=A__ , padding_strategy=A__ , pad_to_multiple_of=A__ , return_attention_mask=A__ , )
for key, value in outputs.items():
if key not in batch_outputs:
lowercase = []
if value.dtype is np.dtype(np.floataa ):
lowercase = value.astype(np.floataa )
batch_outputs[key].append(A__ )
return BatchFeature(A__ , tensor_type=A__ )
def UpperCAmelCase__ (self : Union[str, Any] , A__ : Union[Dict[str, np.ndarray], BatchFeature] , A__ : Optional[int] = None , A__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , A__ : Optional[int] = None , A__ : Optional[bool] = None , ) -> dict:
lowercase = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
lowercase = len(A__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
lowercase = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
lowercase = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(A__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
lowercase = np.ones(len(A__ ) , dtype=np.intaa )
if needs_to_be_padded:
lowercase = max_length - len(A__ )
if self.padding_side == "right":
if return_attention_mask:
lowercase = np.pad(
processed_features["attention_mask"] , (0, difference) )
lowercase = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
lowercase = np.pad(
A__ , A__ , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
lowercase = np.pad(
processed_features["attention_mask"] , (difference, 0) )
lowercase = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
lowercase = np.pad(
A__ , A__ , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def UpperCAmelCase__ (self : Dict , A__ : Union[Dict[str, np.ndarray], BatchFeature] , A__ : Optional[int] = None , A__ : Optional[int] = None , A__ : Optional[bool] = None , ) -> str:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
lowercase = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
lowercase = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
lowercase = len(A__ ) > max_length
if needs_to_be_truncated:
lowercase = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
lowercase = processed_features["attention_mask"][:max_length]
return processed_features
def UpperCAmelCase__ (self : int , A__ : Optional[Any]=False , A__ : Dict=None ) -> Optional[int]:
# Get padding strategy
if padding is not False:
if padding is True:
lowercase = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(A__ , A__ ):
lowercase = PaddingStrategy(A__ )
elif isinstance(A__ , A__ ):
lowercase = padding
else:
lowercase = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 459 | 1 |
from __future__ import annotations
import math
def lowerCAmelCase__ ( UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] )-> str:
if len(lowerCamelCase_ ) != 2 or len(a[0] ) != 2 or len(lowerCamelCase_ ) != 2 or len(b[0] ) != 2:
raise Exception('''Matrices are not 2x2''' )
A__ = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def lowerCAmelCase__ ( UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any] )-> List[str]:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase_ ) )
]
def lowerCAmelCase__ ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[int] )-> List[str]:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowerCamelCase_ ) )
]
def lowerCAmelCase__ ( UpperCamelCase_ : Optional[Any] )-> Any:
if len(lowerCamelCase_ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('''Odd matrices are not supported!''' )
A__ = len(lowerCamelCase_ )
A__ = matrix_length // 2
A__ = [[a[i][j] for j in range(lowerCamelCase_ , lowerCamelCase_ )] for i in range(lowerCamelCase_ )]
A__ = [
[a[i][j] for j in range(lowerCamelCase_ , lowerCamelCase_ )] for i in range(lowerCamelCase_ , lowerCamelCase_ )
]
A__ = [[a[i][j] for j in range(lowerCamelCase_ )] for i in range(lowerCamelCase_ )]
A__ = [[a[i][j] for j in range(lowerCamelCase_ )] for i in range(lowerCamelCase_ , lowerCamelCase_ )]
return top_left, top_right, bot_left, bot_right
def lowerCAmelCase__ ( UpperCamelCase_ : Any )-> Any:
return len(lowerCamelCase_ ), len(matrix[0] )
def lowerCAmelCase__ ( UpperCamelCase_ : List[Any] )-> int:
print('''\n'''.join(str(lowerCamelCase_ ) for line in matrix ) )
def lowerCAmelCase__ ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int )-> Any:
if matrix_dimensions(lowerCamelCase_ ) == (2, 2):
return default_matrix_multiplication(lowerCamelCase_ , lowerCamelCase_ )
A__ = split_matrix(lowerCamelCase_ )
A__ = split_matrix(lowerCamelCase_ )
A__ = actual_strassen(lowerCamelCase_ , matrix_subtraction(lowerCamelCase_ , lowerCamelCase_ ) )
A__ = actual_strassen(matrix_addition(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
A__ = actual_strassen(matrix_addition(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
A__ = actual_strassen(lowerCamelCase_ , matrix_subtraction(lowerCamelCase_ , lowerCamelCase_ ) )
A__ = actual_strassen(matrix_addition(lowerCamelCase_ , lowerCamelCase_ ) , matrix_addition(lowerCamelCase_ , lowerCamelCase_ ) )
A__ = actual_strassen(matrix_subtraction(lowerCamelCase_ , lowerCamelCase_ ) , matrix_addition(lowerCamelCase_ , lowerCamelCase_ ) )
A__ = actual_strassen(matrix_subtraction(lowerCamelCase_ , lowerCamelCase_ ) , matrix_addition(lowerCamelCase_ , lowerCamelCase_ ) )
A__ = matrix_addition(matrix_subtraction(matrix_addition(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ ) , lowerCamelCase_ )
A__ = matrix_addition(lowerCamelCase_ , lowerCamelCase_ )
A__ = matrix_addition(lowerCamelCase_ , lowerCamelCase_ )
A__ = matrix_subtraction(matrix_subtraction(matrix_addition(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ ) , lowerCamelCase_ )
# construct the new matrix from our 4 quadrants
A__ = []
for i in range(len(lowerCamelCase_ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(lowerCamelCase_ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def lowerCAmelCase__ ( UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] )-> Union[str, Any]:
if matrix_dimensions(lowerCamelCase_ )[1] != matrix_dimensions(lowerCamelCase_ )[0]:
A__ = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
f"Matrix A: {matrixa}\n"
f"Matrix B: {matrixa}"
)
raise Exception(lowerCamelCase_ )
A__ = matrix_dimensions(lowerCamelCase_ )
A__ = matrix_dimensions(lowerCamelCase_ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
A__ = max(*lowerCamelCase_ , *lowerCamelCase_ )
A__ = int(math.pow(2 , math.ceil(math.loga(lowerCamelCase_ ) ) ) )
A__ = matrixa
A__ = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , lowerCamelCase_ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase_ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase_ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
A__ = actual_strassen(lowerCamelCase_ , lowerCamelCase_ )
# Removing the additional zeros
for i in range(0 , lowerCamelCase_ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , lowerCamelCase_ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
_lowercase = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
_lowercase = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 632 |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def snake_case__ ( lowerCamelCase_ ):
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def snake_case__ ( ):
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def snake_case__ ( ):
A : List[Any] = '''mock-s3-bucket'''
A : Union[str, Any] = F's3://{mock_bucket}'
A : str = extract_path_from_uri(lowerCamelCase_ )
assert dataset_path.startswith('''s3://''' ) is False
A : Tuple = '''./local/path'''
A : Tuple = extract_path_from_uri(lowerCamelCase_ )
assert dataset_path == new_dataset_path
def snake_case__ ( lowerCamelCase_ ):
A : int = is_remote_filesystem(lowerCamelCase_ )
assert is_remote is True
A : Dict = fsspec.filesystem('''file''' )
A : Tuple = is_remote_filesystem(lowerCamelCase_ )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , lowerCamelCase_ )
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
A : List[str] = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
A : List[Any] = input_paths[compression_fs_class.protocol]
if input_path is None:
A : int = F'for \'{compression_fs_class.protocol}\' compression protocol, '
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCamelCase_ )
A : Dict = fsspec.filesystem(compression_fs_class.protocol , fo=lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
A : str = os.path.basename(lowerCamelCase_ )
A : str = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(lowerCamelCase_ , '''r''' , encoding='''utf-8''' ) as f, open(lowerCamelCase_ , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
A : Union[str, Any] = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
A : int = compressed_file_paths[protocol]
A : List[str] = '''dataset.jsonl'''
A : List[str] = F'{protocol}://{member_file_path}::{compressed_file_path}'
A , *A : List[Any] = fsspec.get_fs_token_paths(lowerCamelCase_ )
assert fs.isfile(lowerCamelCase_ )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
A : List[Any] = hf_api.dataset_info(lowerCamelCase_ , token=lowerCamelCase_ )
A : List[str] = HfFileSystem(repo_info=lowerCamelCase_ , token=lowerCamelCase_ )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(lowerCamelCase_ ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def snake_case__ ( ):
A : Tuple = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(lowerCamelCase_ , lowerCamelCase_ , clobber=lowerCamelCase_ )
with pytest.warns(lowerCamelCase_ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(lowerCamelCase_ ) == 1
assert (
str(warning_info[0].message )
== F'A filesystem protocol was already set for {protocol} and will be overwritten.'
)
| 542 | 0 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
_snake_case = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
_snake_case = {
'''allenai/longformer-base-4096''': 4_096,
'''allenai/longformer-large-4096''': 4_096,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4_096,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4_096,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __lowerCamelCase ( ) -> Any:
UpperCamelCase = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
UpperCamelCase = bs[:]
UpperCamelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowercase )
cs.append(2**8 + n )
n += 1
UpperCamelCase = [chr(_lowercase ) for n in cs]
return dict(zip(_lowercase , _lowercase ) )
def __lowerCamelCase ( _lowercase ) -> Union[str, Any]:
UpperCamelCase = set()
UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase = char
return pairs
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : int =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : List[str] =["input_ids", "attention_mask"]
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str="replace" , SCREAMING_SNAKE_CASE__ : Union[str, Any]="<s>" , SCREAMING_SNAKE_CASE__ : int="</s>" , SCREAMING_SNAKE_CASE__ : Any="</s>" , SCREAMING_SNAKE_CASE__ : Optional[Any]="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="<unk>" , SCREAMING_SNAKE_CASE__ : Optional[Any]="<pad>" , SCREAMING_SNAKE_CASE__ : List[str]="<mask>" , SCREAMING_SNAKE_CASE__ : str=False , **SCREAMING_SNAKE_CASE__ : List[Any] , ):
"""simple docstring"""
UpperCamelCase = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else bos_token
UpperCamelCase = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else eos_token
UpperCamelCase = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else sep_token
UpperCamelCase = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cls_token
UpperCamelCase = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else unk_token
UpperCamelCase = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
with open(SCREAMING_SNAKE_CASE__ , encoding='utf-8' ) as vocab_handle:
UpperCamelCase = json.load(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = {v: k for k, v in self.encoder.items()}
UpperCamelCase = errors # how to handle errors in decoding
UpperCamelCase = bytes_to_unicode()
UpperCamelCase = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE__ , encoding='utf-8' ) as merges_handle:
UpperCamelCase = merges_handle.read().split('\n' )[1:-1]
UpperCamelCase = [tuple(merge.split() ) for merge in bpe_merges]
UpperCamelCase = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
UpperCamelCase = {}
UpperCamelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def __lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
return len(self.encoder )
def __lowerCAmelCase ( self : Dict ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
UpperCamelCase = tuple(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = get_pairs(SCREAMING_SNAKE_CASE__ )
if not pairs:
return token
while True:
UpperCamelCase = min(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE__ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase , UpperCamelCase = bigram
UpperCamelCase = []
UpperCamelCase = 0
while i < len(SCREAMING_SNAKE_CASE__ ):
try:
UpperCamelCase = word.index(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCamelCase = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase = tuple(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = new_word
if len(SCREAMING_SNAKE_CASE__ ) == 1:
break
else:
UpperCamelCase = get_pairs(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = ' '.join(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = word
return word
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
UpperCamelCase = []
for token in re.findall(self.pat , SCREAMING_SNAKE_CASE__ ):
UpperCamelCase = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE__ ).split(' ' ) )
return bpe_tokens
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : Tuple ):
"""simple docstring"""
return self.encoder.get(SCREAMING_SNAKE_CASE__ , self.encoder.get(self.unk_token ) )
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Tuple ):
"""simple docstring"""
return self.decoder.get(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
UpperCamelCase = ''.join(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCamelCase = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(SCREAMING_SNAKE_CASE__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__ , ensure_ascii=SCREAMING_SNAKE_CASE__ ) + '\n' )
UpperCamelCase = 0
with open(SCREAMING_SNAKE_CASE__ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE__ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
UpperCamelCase = token_index
writer.write(' '.join(SCREAMING_SNAKE_CASE__ ) + '\n' )
index += 1
return vocab_file, merge_file
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , **SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
UpperCamelCase = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE__ ) > 0 and not text[0].isspace()):
UpperCamelCase = ' ' + text
return (text, kwargs)
| 700 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_snake_case = logging.get_logger(__name__)
def __lowerCamelCase ( _lowercase , _lowercase ) -> int:
UpperCamelCase = b.T
UpperCamelCase = np.sum(np.square(_lowercase ) , axis=1 )
UpperCamelCase = np.sum(np.square(_lowercase ) , axis=0 )
UpperCamelCase = np.matmul(_lowercase , _lowercase )
UpperCamelCase = aa[:, None] - 2 * ab + ba[None, :]
return d
def __lowerCamelCase ( _lowercase , _lowercase ) -> Dict:
UpperCamelCase = x.reshape(-1 , 3 )
UpperCamelCase = squared_euclidean_distance(_lowercase , _lowercase )
return np.argmin(_lowercase , axis=1 )
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] =["pixel_values"]
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Union[List[List[int]], np.ndarray]] = None , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : bool = True , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase = size if size is not None else {'height': 2_56, 'width': 2_56}
UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = np.array(SCREAMING_SNAKE_CASE__ ) if clusters is not None else None
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = resample
UpperCamelCase = do_normalize
UpperCamelCase = do_color_quantize
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : List[str] , ):
"""simple docstring"""
UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE__ )
if "height" not in size or "width" not in size:
raise ValueError(F'Size dictionary must contain both height and width keys. Got {size.keys()}' )
return resize(
SCREAMING_SNAKE_CASE__ , size=(size['height'], size['width']) , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , ):
"""simple docstring"""
UpperCamelCase = rescale(image=SCREAMING_SNAKE_CASE__ , scale=1 / 127.5 , data_format=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = image - 1
return image
def __lowerCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : ImageInput , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[List[List[int]], np.ndarray]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ : Any , ):
"""simple docstring"""
UpperCamelCase = do_resize if do_resize is not None else self.do_resize
UpperCamelCase = size if size is not None else self.size
UpperCamelCase = get_size_dict(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = resample if resample is not None else self.resample
UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
UpperCamelCase = clusters if clusters is not None else self.clusters
UpperCamelCase = np.array(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.' )
# All transformations expect numpy arrays.
UpperCamelCase = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images]
if do_resize:
UpperCamelCase = [self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_normalize:
UpperCamelCase = [self.normalize(image=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_color_quantize:
UpperCamelCase = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
UpperCamelCase = np.array(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = color_quantize(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
UpperCamelCase = images.shape[0]
UpperCamelCase = images.reshape(SCREAMING_SNAKE_CASE__ , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
UpperCamelCase = list(SCREAMING_SNAKE_CASE__ )
else:
UpperCamelCase = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images]
UpperCamelCase = {'input_ids': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
| 170 | 0 |
'''simple docstring'''
import itertools
import string
from collections.abc import Generator, Iterable
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Iterable[str] , SCREAMING_SNAKE_CASE_ : int ) -> Generator[tuple[str, ...], None, None]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = iter(SCREAMING_SNAKE_CASE_ )
while True:
SCREAMING_SNAKE_CASE_ : Tuple = tuple(itertools.islice(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if not chunk:
return
yield chunk
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = "".join([c.upper() for c in dirty if c in string.ascii_letters] )
SCREAMING_SNAKE_CASE_ : List[Any] = ""
if len(SCREAMING_SNAKE_CASE_ ) < 2:
return dirty
for i in range(len(SCREAMING_SNAKE_CASE_ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(SCREAMING_SNAKE_CASE_ ) & 1:
clean += "X"
return clean
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str ) -> list[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = "ABCDEFGHIKLMNOPQRSTUVWXYZ"
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
SCREAMING_SNAKE_CASE_ : Tuple = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(SCREAMING_SNAKE_CASE_ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(SCREAMING_SNAKE_CASE_ )
return table
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = generate_table(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : int = prepare_input(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : List[str] = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(SCREAMING_SNAKE_CASE_ , 2 ):
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Tuple = divmod(table.index(SCREAMING_SNAKE_CASE_ ) , 5 )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[Any] = divmod(table.index(SCREAMING_SNAKE_CASE_ ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = generate_table(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : int = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(SCREAMING_SNAKE_CASE_ , 2 ):
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = divmod(table.index(SCREAMING_SNAKE_CASE_ ) , 5 )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = divmod(table.index(SCREAMING_SNAKE_CASE_ ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 421 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
snake_case_ = logging.get_logger(__name__)
# General docstring
snake_case_ = 'RegNetConfig'
# Base docstring
snake_case_ = 'facebook/regnet-y-040'
snake_case_ = [1, 1_0_8_8, 7, 7]
# Image classification docstring
snake_case_ = 'facebook/regnet-y-040'
snake_case_ = 'tabby, tabby cat'
snake_case_ = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
def __init__( self , lowercase__ , lowercase__ = 3 , lowercase__ = 1 , lowercase__ = 1 , lowercase__ = "relu" , **lowercase__ , ):
"""simple docstring"""
super().__init__(**lowercase__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
SCREAMING_SNAKE_CASE_ : List[str] = tf.keras.layers.ConvaD(
filters=lowercase__ , kernel_size=lowercase__ , strides=lowercase__ , padding="VALID" , groups=lowercase__ , use_bias=lowercase__ , name="convolution" , )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ACTaFN[activation] if activation is not None else tf.identity
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.convolution(self.padding(lowercase__ ) )
SCREAMING_SNAKE_CASE_ : Tuple = self.normalization(lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.activation(lowercase__ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
def __init__( self , lowercase__ , **lowercase__ ):
"""simple docstring"""
super().__init__(**lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = config.num_channels
SCREAMING_SNAKE_CASE_ : Dict = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = shape_list(lowercase__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
SCREAMING_SNAKE_CASE_ : Any = tf.transpose(lowercase__ , perm=(0, 2, 3, 1) )
SCREAMING_SNAKE_CASE_ : Any = self.embedder(lowercase__ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
def __init__( self , lowercase__ , lowercase__ = 2 , **lowercase__ ):
"""simple docstring"""
super().__init__(**lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = tf.keras.layers.ConvaD(
filters=lowercase__ , kernel_size=1 , strides=lowercase__ , use_bias=lowercase__ , name="convolution" )
SCREAMING_SNAKE_CASE_ : List[str] = tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name="normalization" )
def __lowerCamelCase ( self , lowercase__ , lowercase__ = False ):
"""simple docstring"""
return self.normalization(self.convolution(lowercase__ ) , training=lowercase__ )
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
def __init__( self , lowercase__ , lowercase__ , **lowercase__ ):
"""simple docstring"""
super().__init__(**lowercase__ )
SCREAMING_SNAKE_CASE_ : Dict = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase__ , name="pooler" )
SCREAMING_SNAKE_CASE_ : List[str] = [
tf.keras.layers.ConvaD(filters=lowercase__ , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=lowercase__ , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.pooler(lowercase__ )
for layer_module in self.attention:
SCREAMING_SNAKE_CASE_ : Any = layer_module(lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_state * pooled
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = 1 , **lowercase__ ):
"""simple docstring"""
super().__init__(**lowercase__ )
SCREAMING_SNAKE_CASE_ : Dict = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE_ : int = max(1 , out_channels // config.groups_width )
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
TFRegNetShortCut(lowercase__ , stride=lowercase__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
SCREAMING_SNAKE_CASE_ : int = [
TFRegNetConvLayer(lowercase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
lowercase__ , stride=lowercase__ , groups=lowercase__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(lowercase__ , kernel_size=1 , activation=lowercase__ , name="layer.2" ),
]
SCREAMING_SNAKE_CASE_ : Dict = ACTaFN[config.hidden_act]
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = hidden_state
for layer_module in self.layers:
SCREAMING_SNAKE_CASE_ : List[Any] = layer_module(lowercase__ )
SCREAMING_SNAKE_CASE_ : Dict = self.shortcut(lowercase__ )
hidden_state += residual
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.activation(lowercase__ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = 1 , **lowercase__ ):
"""simple docstring"""
super().__init__(**lowercase__ )
SCREAMING_SNAKE_CASE_ : int = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE_ : Any = max(1 , out_channels // config.groups_width )
SCREAMING_SNAKE_CASE_ : str = (
TFRegNetShortCut(lowercase__ , stride=lowercase__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
SCREAMING_SNAKE_CASE_ : Tuple = [
TFRegNetConvLayer(lowercase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
lowercase__ , stride=lowercase__ , groups=lowercase__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(lowercase__ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(lowercase__ , kernel_size=1 , activation=lowercase__ , name="layer.3" ),
]
SCREAMING_SNAKE_CASE_ : Any = ACTaFN[config.hidden_act]
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = hidden_state
for layer_module in self.layers:
SCREAMING_SNAKE_CASE_ : str = layer_module(lowercase__ )
SCREAMING_SNAKE_CASE_ : Any = self.shortcut(lowercase__ )
hidden_state += residual
SCREAMING_SNAKE_CASE_ : Optional[int] = self.activation(lowercase__ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = 2 , lowercase__ = 2 , **lowercase__ ):
"""simple docstring"""
super().__init__(**lowercase__ )
SCREAMING_SNAKE_CASE_ : Tuple = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
SCREAMING_SNAKE_CASE_ : str = [
# downsampling is done in the first layer with stride of 2
layer(lowercase__ , lowercase__ , lowercase__ , stride=lowercase__ , name="layers.0" ),
*[layer(lowercase__ , lowercase__ , lowercase__ , name=F"layers.{i+1}" ) for i in range(depth - 1 )],
]
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
for layer_module in self.layers:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = layer_module(lowercase__ )
return hidden_state
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
def __init__( self , lowercase__ , **lowercase__ ):
"""simple docstring"""
super().__init__(**lowercase__ )
SCREAMING_SNAKE_CASE_ : str = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowercase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
SCREAMING_SNAKE_CASE_ : List[str] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowercase__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowercase__ , lowercase__ , lowercase__ , depth=lowercase__ , name=F"stages.{i+1}" ) )
def __lowerCamelCase ( self , lowercase__ , lowercase__ = False , lowercase__ = True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_states + (hidden_state,)
SCREAMING_SNAKE_CASE_ : Any = stage_module(lowercase__ )
if output_hidden_states:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowercase__ , hidden_states=lowercase__ )
@keras_serializable
class SCREAMING_SNAKE_CASE__ ( tf.keras.layers.Layer ):
_A = RegNetConfig
def __init__( self , lowercase__ , **lowercase__ ):
"""simple docstring"""
super().__init__(**lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = config
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TFRegNetEmbeddings(lowercase__ , name="embedder" )
SCREAMING_SNAKE_CASE_ : List[Any] = TFRegNetEncoder(lowercase__ , name="encoder" )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase__ , name="pooler" )
@unpack_inputs
def __lowerCamelCase ( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = False , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_ : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ : Any = self.embedder(lowercase__ , training=lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.encoder(
lowercase__ , output_hidden_states=lowercase__ , return_dict=lowercase__ , training=lowercase__ )
SCREAMING_SNAKE_CASE_ : Tuple = encoder_outputs[0]
SCREAMING_SNAKE_CASE_ : int = self.pooler(lowercase__ )
# Change to NCHW output format have uniformity in the modules
SCREAMING_SNAKE_CASE_ : Dict = tf.transpose(lowercase__ , perm=(0, 3, 1, 2) )
SCREAMING_SNAKE_CASE_ : Optional[int] = tf.transpose(lowercase__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
SCREAMING_SNAKE_CASE_ : str = tuple([tf.transpose(lowercase__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase__ , pooler_output=lowercase__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
_A = RegNetConfig
_A = "regnet"
_A = "pixel_values"
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
snake_case_ = R'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
snake_case_ = R'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top.",_UpperCAmelCase,)
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__( self , lowercase__ , *lowercase__ , **lowercase__ ):
"""simple docstring"""
super().__init__(lowercase__ , *lowercase__ , **lowercase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = TFRegNetMainLayer(lowercase__ , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __lowerCamelCase ( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__=False , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ : Optional[int] = self.regnet(
pixel_values=lowercase__ , output_hidden_states=lowercase__ , return_dict=lowercase__ , training=lowercase__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ",_UpperCAmelCase,)
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase,_UpperCAmelCase ):
def __init__( self , lowercase__ , *lowercase__ , **lowercase__ ):
"""simple docstring"""
super().__init__(lowercase__ , *lowercase__ , **lowercase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = config.num_labels
SCREAMING_SNAKE_CASE_ : int = TFRegNetMainLayer(lowercase__ , name="regnet" )
# classification head
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __lowerCamelCase ( self , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__=False , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE_ : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE_ : str = self.regnet(
lowercase__ , output_hidden_states=lowercase__ , return_dict=lowercase__ , training=lowercase__ )
SCREAMING_SNAKE_CASE_ : Dict = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE_ : Any = self.classifier[0](lowercase__ )
SCREAMING_SNAKE_CASE_ : str = self.classifier[1](lowercase__ )
SCREAMING_SNAKE_CASE_ : List[str] = None if labels is None else self.hf_compute_loss(labels=lowercase__ , logits=lowercase__ )
if not return_dict:
SCREAMING_SNAKE_CASE_ : List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowercase__ , logits=lowercase__ , hidden_states=outputs.hidden_states )
| 421 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json",
}
class a_ ( _snake_case , _snake_case ):
UpperCamelCase__ : Dict ="convnextv2"
def __init__( self :int , _lowercase :List[Any]=3 , _lowercase :Union[str, Any]=4 , _lowercase :int=4 , _lowercase :Tuple=None , _lowercase :Dict=None , _lowercase :Dict="gelu" , _lowercase :Union[str, Any]=0.02 , _lowercase :Optional[int]=1E-1_2 , _lowercase :Dict=0.0 , _lowercase :int=224 , _lowercase :Optional[int]=None , _lowercase :Dict=None , **_lowercase :int , ) -> Union[str, Any]:
super().__init__(**_lowercase)
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_stages
UpperCAmelCase_ = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
UpperCAmelCase_ = [3, 3, 9, 3] if depths is None else depths
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = image_size
UpperCAmelCase_ = ['''stem'''] + [f"stage{idx}" for idx in range(1 , len(self.depths) + 1)]
UpperCAmelCase_ , UpperCAmelCase_ = get_aligned_output_features_output_indices(
out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names)
| 712 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def A ( ) -> Optional[int]:
'''simple docstring'''
raise RuntimeError('''CUDA out of memory.''' )
class a_ ( nn.Module ):
def __init__( self :Dict) -> Any:
super().__init__()
UpperCAmelCase_ = nn.Linear(3 , 4)
UpperCAmelCase_ = nn.BatchNormad(4)
UpperCAmelCase_ = nn.Linear(4 , 5)
def __a ( self :str , _lowercase :int) -> str:
return self.lineara(self.batchnorm(self.lineara(_lowercase)))
class a_ ( unittest.TestCase ):
def __a ( self :Any) -> int:
UpperCAmelCase_ = []
@find_executable_batch_size(starting_batch_size=128)
def mock_training_loop_function(_lowercase :List[str]):
nonlocal batch_sizes
batch_sizes.append(_lowercase)
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(_lowercase , [128, 64, 32, 16, 8])
def __a ( self :Union[str, Any]) -> Union[str, Any]:
UpperCAmelCase_ = []
@find_executable_batch_size(starting_batch_size=128)
def mock_training_loop_function(_lowercase :Optional[int] , _lowercase :str):
nonlocal batch_sizes
batch_sizes.append(_lowercase)
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
UpperCAmelCase_ , UpperCAmelCase_ = mock_training_loop_function('''hello''')
self.assertListEqual(_lowercase , [128, 64, 32, 16, 8])
self.assertListEqual([bs, arga] , [8, '''hello'''])
def __a ( self :Optional[Any]) -> str:
@find_executable_batch_size(starting_batch_size=0)
def mock_training_loop_function(_lowercase :Optional[Any]):
pass
with self.assertRaises(_lowercase) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def __a ( self :Any) -> Optional[Any]:
@find_executable_batch_size(starting_batch_size=16)
def mock_training_loop_function(_lowercase :Tuple):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(_lowercase) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0])
def __a ( self :str) -> Dict:
@find_executable_batch_size(starting_batch_size=128)
def mock_training_loop_function(_lowercase :List[Any] , _lowercase :Union[str, Any] , _lowercase :Tuple):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(_lowercase) as cm:
mock_training_loop_function(128 , '''hello''' , '''world''')
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0])
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0])
def __a ( self :Optional[int]) -> Any:
@find_executable_batch_size(starting_batch_size=16)
def mock_training_loop_function(_lowercase :List[str]):
raise ValueError('''Oops, we had an error!''')
with self.assertRaises(_lowercase) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0])
@require_cuda
def __a ( self :List[Any]) -> Union[str, Any]:
UpperCAmelCase_ = torch.cuda.memory_allocated()
UpperCAmelCase_ = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , _lowercase)
UpperCAmelCase_ = release_memory(_lowercase)
self.assertEqual(torch.cuda.memory_allocated() , _lowercase)
| 561 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( snake_case : Union[str, Any]=28_123 )-> List[str]:
_lowerCamelCase = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
_lowerCamelCase = set()
_lowerCamelCase = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(snake_case )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 650 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( snake_case : int , snake_case : Tuple )-> Dict:
_lowerCamelCase = [1]
for i in range(2 , snake_case ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
_lowerCamelCase = []
_lowerCamelCase = list(range(snake_case ) )
# Find permutation
while factorials:
_lowerCamelCase = factorials.pop()
_lowerCamelCase , _lowerCamelCase = divmod(snake_case , snake_case )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 650 | 1 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def _lowerCamelCase ( __A : Optional[int] ) -> str:
return 1 / (1 + np.exp(-z ))
def _lowerCamelCase ( __A : Tuple , __A : List[Any] ) -> Optional[int]:
return (-y * np.log(__A ) - (1 - y) * np.log(1 - h )).mean()
def _lowerCamelCase ( __A : Tuple , __A : List[Any] , __A : int ) -> Any:
_UpperCAmelCase : int = np.dot(__A , __A )
return np.sum(y * scores - np.log(1 + np.exp(__A ) ) )
def _lowerCamelCase ( __A : List[Any] , __A : Union[str, Any] , __A : str , __A : Dict=70_000 ) -> Optional[Any]:
_UpperCAmelCase : Union[str, Any] = np.zeros(x.shape[1] )
for iterations in range(__A ):
_UpperCAmelCase : List[Any] = np.dot(__A , __A )
_UpperCAmelCase : Union[str, Any] = sigmoid_function(__A )
_UpperCAmelCase : Optional[Any] = np.dot(x.T , h - y ) / y.size
_UpperCAmelCase : Optional[Any] = theta - alpha * gradient # updating the weights
_UpperCAmelCase : List[Any] = np.dot(__A , __A )
_UpperCAmelCase : Any = sigmoid_function(__A )
_UpperCAmelCase : Union[str, Any] = cost_function(__A , __A )
if iterations % 100 == 0:
print(f'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = datasets.load_iris()
SCREAMING_SNAKE_CASE = iris.data[:, :2]
SCREAMING_SNAKE_CASE = (iris.target != 0) * 1
SCREAMING_SNAKE_CASE = 0.1
SCREAMING_SNAKE_CASE = logistic_reg(alpha, x, y, max_iterations=70000)
print('theta: ', theta) # printing the theta i.e our weights vector
def _lowerCamelCase ( __A : Any ) -> int:
return sigmoid_function(
np.dot(__A , __A ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = (x[:, 0].min(), x[:, 0].max())
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = (x[:, 1].min(), x[:, 1].max())
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
SCREAMING_SNAKE_CASE = np.c_[xxa.ravel(), xxa.ravel()]
SCREAMING_SNAKE_CASE = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 186 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase ( __A : int , __A : Optional[Any] , __A : int ) -> int:
# Initialise PyTorch model
_UpperCAmelCase : Dict = RemBertConfig.from_json_file(__A )
print('''Building PyTorch model from configuration: {}'''.format(str(__A ) ) )
_UpperCAmelCase : int = RemBertModel(__A )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(__A , __A , __A )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(__A ) )
torch.save(model.state_dict() , __A )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 186 | 1 |
'''simple docstring'''
def UpperCAmelCase ( A : int ):
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
lowerCAmelCase_ : Tuple = int(input('Enter number: ').strip())
print(f'{number} is {"" if perfect(number) else "not "}a Perfect Number.')
| 527 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def UpperCAmelCase ( A : dict , A : str , A : set , A : set , A : dict , A : dict , A : PriorityQueue , A : dict , A : float | int , ):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
SCREAMING_SNAKE_CASE : str = cst_fwd.get(A , np.inf )
SCREAMING_SNAKE_CASE : Optional[int] = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
SCREAMING_SNAKE_CASE : Tuple = new_cost_f
SCREAMING_SNAKE_CASE : List[str] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
SCREAMING_SNAKE_CASE : Union[str, Any] = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def UpperCAmelCase ( A : str , A : str , A : dict , A : dict ):
SCREAMING_SNAKE_CASE : Dict = -1
SCREAMING_SNAKE_CASE : Optional[int] = set()
SCREAMING_SNAKE_CASE : Optional[Any] = set()
SCREAMING_SNAKE_CASE : Any = {source: 0}
SCREAMING_SNAKE_CASE : Tuple = {destination: 0}
SCREAMING_SNAKE_CASE : Union[str, Any] = {source: None}
SCREAMING_SNAKE_CASE : Optional[int] = {destination: None}
SCREAMING_SNAKE_CASE : PriorityQueue[Any] = PriorityQueue()
SCREAMING_SNAKE_CASE : PriorityQueue[Any] = PriorityQueue()
SCREAMING_SNAKE_CASE : List[str] = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = queue_forward.get()
visited_forward.add(A )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = queue_backward.get()
visited_backward.add(A )
SCREAMING_SNAKE_CASE : Tuple = pass_and_relaxation(
A , A , A , A , A , A , A , A , A , )
SCREAMING_SNAKE_CASE : Optional[int] = pass_and_relaxation(
A , A , A , A , A , A , A , A , A , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
SCREAMING_SNAKE_CASE : int = shortest_distance
return shortest_path_distance
lowerCAmelCase_ : int = {
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
lowerCAmelCase_ : str = {
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 527 | 1 |
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : Dict = RoFormerTokenizer
A_ : Optional[Any] = RoFormerTokenizerFast
A_ : Optional[Any] = True
A_ : List[str] = True
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
super().setUp()
def _SCREAMING_SNAKE_CASE ( self : Optional[int], **_lowerCamelCase : Any ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''', **_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Dict, **_lowerCamelCase : Optional[int] ):
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''', **_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
__A = '''永和服装饰品有限公司,今天天气非常好'''
__A = '''永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'''
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
__A = self.get_tokenizer()
__A , __A = self.get_chinese_input_output_texts()
__A = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase, output_text.split() )
__A = tokens + [tokenizer.unk_token]
__A = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ), _lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A = self.get_rust_tokenizer()
__A , __A = self.get_chinese_input_output_texts()
__A = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase, output_text.split() )
__A = tokens + [tokenizer.unk_token]
__A = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ), _lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
pass
| 215 |
"""simple docstring"""
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
def get_matched_characters(__UpperCamelCase , __UpperCamelCase ) -> str:
__A = []
__A = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__A = int(max(0 , i - limit ) )
__A = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__UpperCamelCase )
__A = f'{_stra[0:_stra.index(__UpperCamelCase )]} {_stra[_stra.index(__UpperCamelCase ) + 1:]}'
return "".join(__UpperCamelCase )
# matching characters
__A = get_matched_characters(__UpperCamelCase , __UpperCamelCase )
__A = get_matched_characters(__UpperCamelCase , __UpperCamelCase )
__A = len(__UpperCamelCase )
# transposition
__A = (
len([(ca, ca) for ca, ca in zip(__UpperCamelCase , __UpperCamelCase ) if ca != ca] ) // 2
)
if not match_count:
__A = 0.0
else:
__A = (
1
/ 3
* (
match_count / len(__UpperCamelCase )
+ match_count / len(__UpperCamelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__A = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('hello', 'world'))
| 215 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase__ : Union[str, Any] = {'''configuration_encoder_decoder''': ['''EncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = ['''EncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : List[str] = ['''TFEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Optional[int] = ['''FlaxEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCamelCase__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 105 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """dpr"""
def __init__( self , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=0 , lowercase="absolute" , lowercase = 0 , **lowercase , ):
super().__init__(pad_token_id=lowercase , **lowercase )
_lowerCamelCase : int = vocab_size
_lowerCamelCase : Optional[int] = hidden_size
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : Tuple = attention_probs_dropout_prob
_lowerCamelCase : Union[str, Any] = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Optional[int] = layer_norm_eps
_lowerCamelCase : Dict = projection_dim
_lowerCamelCase : int = position_embedding_type | 630 | 0 |
'''simple docstring'''
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _snake_case ( a_ ):
SCREAMING_SNAKE_CASE : List[Any] = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
SCREAMING_SNAKE_CASE : List[str] = '''CIDAS/clipseg-rd64-refined'''
SCREAMING_SNAKE_CASE : Tuple = '''image_segmenter'''
SCREAMING_SNAKE_CASE : Optional[int] = CLIPSegForImageSegmentation
SCREAMING_SNAKE_CASE : Dict = ['''image''', '''text''']
SCREAMING_SNAKE_CASE : List[Any] = ['''image''']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
requires_backends(self , ['vision'] )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.pre_processor(text=[label] , images=[image] , padding=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
with torch.no_grad():
lowerCAmelCase = self.model(**_SCREAMING_SNAKE_CASE ).logits
return logits
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = outputs.cpu().detach().numpy()
lowerCAmelCase = 0
lowerCAmelCase = 1
return Image.fromarray((array * 2_55).astype(np.uinta ) )
| 714 |
'''simple docstring'''
_UpperCamelCase : Optional[int] = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_UpperCamelCase : str = [{"type": "code", "content": INSTALL_CONTENT}]
_UpperCamelCase : Any = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 514 | 0 |
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCamelCase__ ( ):
__UpperCAmelCase : Union[str, Any] = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
__UpperCAmelCase : Any = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(__lowerCamelCase )
DownloadCommand.register_subcommand(__lowerCamelCase )
EnvironmentCommand.register_subcommand(__lowerCamelCase )
RunCommand.register_subcommand(__lowerCamelCase )
ServeCommand.register_subcommand(__lowerCamelCase )
UserCommands.register_subcommand(__lowerCamelCase )
AddNewModelCommand.register_subcommand(__lowerCamelCase )
AddNewModelLikeCommand.register_subcommand(__lowerCamelCase )
LfsCommands.register_subcommand(__lowerCamelCase )
PTtoTFCommand.register_subcommand(__lowerCamelCase )
# Let's go
__UpperCAmelCase : Optional[Any] = parser.parse_args()
if not hasattr(__lowerCamelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
__UpperCAmelCase : Tuple = args.func(__lowerCamelCase )
service.run()
if __name__ == "__main__":
main()
| 63 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ :List[Any] = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ :List[str] = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ :Dict = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ :List[str] = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ :Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 150 | 0 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class snake_case_ ( _a , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase =RoCBertTokenizer
__UpperCAmelCase =None
__UpperCAmelCase =False
__UpperCAmelCase =True
__UpperCAmelCase =filter_non_english
def A__ ( self ):
super().setUp()
__lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
__lowerCAmelCase = {}
__lowerCAmelCase = {}
for i, value in enumerate(_A ):
__lowerCAmelCase = i
__lowerCAmelCase = i
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer:
json.dump(_A , _A , ensure_ascii=_A )
with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer:
json.dump(_A , _A , ensure_ascii=_A )
def A__ ( self ):
__lowerCAmelCase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__lowerCAmelCase = tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(_A , ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(_A ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(_A ) , [5, 6, 2, 5, 7, 8] )
def A__ ( self ):
__lowerCAmelCase = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def A__ ( self ):
__lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def A__ ( self ):
__lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def A__ ( self ):
__lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def A__ ( self ):
__lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def A__ ( self ):
__lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def A__ ( self ):
__lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def A__ ( self ):
__lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def A__ ( self ):
__lowerCAmelCase = RoCBertBasicTokenizer(do_lower_case=_A , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def A__ ( self ):
__lowerCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__lowerCAmelCase = {}
for i, token in enumerate(_A ):
__lowerCAmelCase = i
__lowerCAmelCase = RoCBertWordpieceTokenizer(vocab=_A , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def A__ ( self ):
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def A__ ( self ):
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def A__ ( self ):
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def A__ ( self ):
__lowerCAmelCase = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_A ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
__lowerCAmelCase = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(_A ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
def A__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(_A , **_A )
__lowerCAmelCase = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
__lowerCAmelCase = tokenizer_r.encode_plus(
_A , return_attention_mask=_A , return_token_type_ids=_A , return_offsets_mapping=_A , add_special_tokens=_A , )
__lowerCAmelCase = tokenizer_r.do_lower_case if hasattr(_A , 'do_lower_case' ) else False
__lowerCAmelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'Allen'),
((2_1, 2_3), '##NL'),
((2_3, 2_4), '##P'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'allen'),
((2_1, 2_3), '##nl'),
((2_3, 2_4), '##p'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def A__ ( self ):
__lowerCAmelCase = ['的', '人', '有']
__lowerCAmelCase = ''.join(_A )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCAmelCase = True
__lowerCAmelCase = self.tokenizer_class.from_pretrained(_A , **_A )
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(_A , **_A )
__lowerCAmelCase = tokenizer_p.encode(_A , add_special_tokens=_A )
__lowerCAmelCase = tokenizer_r.encode(_A , add_special_tokens=_A )
__lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(_A )
__lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(_A )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_A , _A )
self.assertListEqual(_A , _A )
__lowerCAmelCase = False
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(_A , **_A )
__lowerCAmelCase = self.tokenizer_class.from_pretrained(_A , **_A )
__lowerCAmelCase = tokenizer_r.encode(_A , add_special_tokens=_A )
__lowerCAmelCase = tokenizer_p.encode(_A , add_special_tokens=_A )
__lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(_A )
__lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(_A )
# it is expected that only the first Chinese character is not preceded by "##".
__lowerCAmelCase = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(_A )
]
self.assertListEqual(_A , _A )
self.assertListEqual(_A , _A )
@slow
def A__ ( self ):
__lowerCAmelCase = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__lowerCAmelCase = tokenizer.encode('你好' , add_special_tokens=_A )
__lowerCAmelCase = tokenizer.encode('你是谁' , add_special_tokens=_A )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_A )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(_A , _A )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def A__ ( self ):
__lowerCAmelCase = self.get_tokenizers(do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
__lowerCAmelCase = '你好,你是谁'
__lowerCAmelCase = tokenizer.tokenize(_A )
__lowerCAmelCase = tokenizer.convert_tokens_to_ids(_A )
__lowerCAmelCase = tokenizer.convert_tokens_to_shape_ids(_A )
__lowerCAmelCase = tokenizer.convert_tokens_to_pronunciation_ids(_A )
__lowerCAmelCase = tokenizer.prepare_for_model(
_A , _A , _A , add_special_tokens=_A )
__lowerCAmelCase = tokenizer.encode_plus(_A , add_special_tokens=_A )
self.assertEqual(_A , _A )
| 102 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class snake_case_ :
"""simple docstring"""
def __init__( self , _A , _A=None , _A=None , _A=None , _A="resnet50" , _A=3 , _A=3_2 , _A=3 , _A=True , _A=True , ):
__lowerCAmelCase = parent
__lowerCAmelCase = out_indices if out_indices is not None else [4]
__lowerCAmelCase = stage_names
__lowerCAmelCase = out_features
__lowerCAmelCase = backbone
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = use_pretrained_backbone
__lowerCAmelCase = is_training
def A__ ( self ):
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = self.get_config()
return config, pixel_values
def A__ ( self ):
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def A__ ( self , _A , _A ):
__lowerCAmelCase = TimmBackbone(config=_A )
model.to(_A )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(_A )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 1_4, 1_4) , )
def A__ ( self ):
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase, __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class snake_case_ ( _a , _a , _a , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase =(TimmBackbone,) if is_torch_available() else ()
__UpperCAmelCase ={"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
__UpperCAmelCase =False
__UpperCAmelCase =False
__UpperCAmelCase =False
__UpperCAmelCase =False
def A__ ( self ):
__lowerCAmelCase = TimmBackboneModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=_A , has_text_modality=_A )
def A__ ( self ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self ):
__lowerCAmelCase = 'resnet18'
__lowerCAmelCase = 'microsoft/resnet-18'
__lowerCAmelCase = AutoBackbone.from_pretrained(_A , use_timm_backbone=_A )
__lowerCAmelCase = AutoBackbone.from_pretrained(_A )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__lowerCAmelCase = AutoBackbone.from_pretrained(_A , use_timm_backbone=_A , out_indices=[1, 2, 3] )
__lowerCAmelCase = AutoBackbone.from_pretrained(_A , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def A__ ( self ):
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def A__ ( self ):
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def A__ ( self ):
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def A__ ( self ):
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def A__ ( self ):
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def A__ ( self ):
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def A__ ( self ):
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def A__ ( self ):
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def A__ ( self ):
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def A__ ( self ):
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def A__ ( self ):
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def A__ ( self ):
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def A__ ( self ):
pass
@unittest.skip('Safetensors is not supported by timm.' )
def A__ ( self ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def A__ ( self ):
pass
def A__ ( self ):
__lowerCAmelCase, __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(_A )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A )
def A__ ( self ):
__lowerCAmelCase, __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
__lowerCAmelCase = self.has_attentions
# no need to test all models as different heads yield the same functionality
__lowerCAmelCase = self.all_model_classes[0]
__lowerCAmelCase = model_class(_A )
model.to(_A )
__lowerCAmelCase = self._prepare_for_class(_A , _A )
__lowerCAmelCase = model(**_A )
__lowerCAmelCase = outputs[0][-1]
# Encoder-/Decoder-only models
__lowerCAmelCase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__lowerCAmelCase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=_A )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def A__ ( self ):
__lowerCAmelCase, __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(_A )
model.to(_A )
model.eval()
__lowerCAmelCase = model(**_A )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__lowerCAmelCase = copy.deepcopy(_A )
__lowerCAmelCase = None
__lowerCAmelCase = model_class(_A )
model.to(_A )
model.eval()
__lowerCAmelCase = model(**_A )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__lowerCAmelCase = copy.deepcopy(_A )
__lowerCAmelCase = False
__lowerCAmelCase = model_class(_A )
model.to(_A )
model.eval()
__lowerCAmelCase = model(**_A )
| 102 | 1 |
"""simple docstring"""
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def _UpperCamelCase ( UpperCamelCase ) -> Tuple:
"""simple docstring"""
__UpperCAmelCase : int = [
"decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase , UpperCamelCase )
def _UpperCamelCase ( UpperCamelCase ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Any = emb.weight.shape
__UpperCAmelCase : Dict = nn.Linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase )
__UpperCAmelCase : Optional[Any] = emb.weight.data
return lin_layer
def _UpperCamelCase ( UpperCamelCase ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : Tuple = torch.load(UpperCamelCase , map_location="cpu" )
__UpperCAmelCase : Tuple = Namespace(**checkpoint["cfg"]["model"] )
__UpperCAmelCase : str = checkpoint["model"]
remove_ignore_keys_(UpperCamelCase )
__UpperCAmelCase : int = state_dict["decoder.embed_tokens.weight"].shape[0]
__UpperCAmelCase : Optional[int] = {key.replace("decoder" , "model" ): val for key, val in state_dict.items()}
__UpperCAmelCase : Any = XGLMConfig(
vocab_size=UpperCamelCase , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="gelu" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
__UpperCAmelCase : Union[str, Any] = XGLMForCausalLM(UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
print(UpperCamelCase )
__UpperCAmelCase : int = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
A = parser.parse_args()
A = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 77 |
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) -> float:
'''simple docstring'''
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
_lowercase : str = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
_lowercase : str = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 322 | 0 |
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
lowercase_ = logging.get_logger(__name__)
lowercase_ = {}
lowercase_ = {}
lowercase_ = {}
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , ) -> Dict:
lowercase__ = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F"""Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})""" )
lowercase__ = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F"""Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})""" )
lowercase__ = format_type
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple:
lowercase__ = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
lowercase__ = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["""python"""])
_register_formatter(ArrowFormatter, """arrow""", aliases=["""pa""", """pyarrow"""])
_register_formatter(NumpyFormatter, """numpy""", aliases=["""np"""])
_register_formatter(PandasFormatter, """pandas""", aliases=["""pd"""])
_register_formatter(CustomFormatter, """custom""")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, """torch""", aliases=["""pt""", """pytorch"""])
else:
lowercase_ = ValueError("""PyTorch needs to be installed to be able to return PyTorch tensors.""")
_register_unavailable_formatter(_torch_error, """torch""", aliases=["""pt""", """pytorch"""])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, """tensorflow""", aliases=["""tf"""])
else:
lowercase_ = ValueError("""Tensorflow needs to be installed to be able to return Tensorflow tensors.""")
_register_unavailable_formatter(_tf_error, """tensorflow""", aliases=["""tf"""])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, """jax""", aliases=[])
else:
lowercase_ = ValueError("""JAX needs to be installed to be able to return JAX arrays.""")
_register_unavailable_formatter(_jax_error, """jax""", aliases=[])
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[str]:
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Formatter:
lowercase__ = get_format_type_from_alias(_SCREAMING_SNAKE_CASE )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**_SCREAMING_SNAKE_CASE )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F"""Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'""" )
| 45 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Tuple = 'ClapFeatureExtractor'
_UpperCamelCase : Union[str, Any] = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self : List[Any] , a : int , a : str )-> Any:
"""simple docstring"""
super().__init__(a , a )
def __call__( self : Any , a : Tuple=None , a : Optional[int]=None , a : int=None , **a : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = kwargs.pop('sampling_rate' , a )
if text is None and audios is None:
raise ValueError('You have to specify either text or audios. Both cannot be none.' )
if text is not None:
lowercase__ = self.tokenizer(a , return_tensors=a , **a )
if audios is not None:
lowercase__ = self.feature_extractor(
a , sampling_rate=a , return_tensors=a , **a )
if text is not None and audios is not None:
lowercase__ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def SCREAMING_SNAKE_CASE_ ( self : str , *a : Dict , **a : int )-> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*a , **a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , *a : int , **a : Dict )-> Dict:
"""simple docstring"""
return self.tokenizer.decode(*a , **a )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 45 | 1 |
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
a_ = _symbol_database.Default()
a_ = _descriptor_pool.Default().AddSerializedFile(
B'\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'
)
a_ = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
a_ = None
a_ = b"""H\003"""
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
a_ = 45
a_ = 1_581
a_ = 1_517
a_ = 1_570
a_ = 1_584
a_ = 1_793
a_ = 1_795
a_ = 1_916
a_ = 1_864
a_ = 1_905
a_ = 1_919
a_ = 2_429
a_ = 2_208
a_ = 2_418
a_ = 2_323
a_ = 2_407
# @@protoc_insertion_point(module_scope)
| 685 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_a : Tuple = """\
"""
_a : Tuple = """
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
"""
_a : Optional[Any] = """
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to 'cuda' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]
>>> results = perplexity.compute(model_id='gpt2',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
78.22
>>> print(round(results[\"perplexities\"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric(\"perplexity\")
>>> input_texts = datasets.load_dataset(\"wikitext\",
... \"wikitext-2-raw-v1\",
... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!='']
>>> results = perplexity.compute(model_id='gpt2',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
['perplexities', 'mean_perplexity']
>>> print(round(results[\"mean_perplexity\"], 2))
60.35
>>> print(round(results[\"perplexities\"][0], 2))
81.12
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ),reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""],)
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = 16,__SCREAMING_SNAKE_CASE = True,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__lowerCAmelCase = """cuda"""
else:
__lowerCAmelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
__lowerCAmelCase = AutoModelForCausalLM.from_pretrained(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = model.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__lowerCAmelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(__SCREAMING_SNAKE_CASE ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__lowerCAmelCase = model.config.max_length - 1
else:
__lowerCAmelCase = model.config.max_length
__lowerCAmelCase = tokenizer(
__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE,padding=__SCREAMING_SNAKE_CASE,truncation=__SCREAMING_SNAKE_CASE,max_length=__SCREAMING_SNAKE_CASE,return_tensors="""pt""",return_attention_mask=__SCREAMING_SNAKE_CASE,).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = encodings["""input_ids"""]
__lowerCAmelCase = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ),1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ),2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__lowerCAmelCase = []
__lowerCAmelCase = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0,len(__SCREAMING_SNAKE_CASE ),__SCREAMING_SNAKE_CASE ) ):
__lowerCAmelCase = min(start_index + batch_size,len(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = encoded_texts[start_index:end_index]
__lowerCAmelCase = attn_masks[start_index:end_index]
if add_start_token:
__lowerCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch],dim=1 )
__lowerCAmelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size(),dtype=torch.intaa ).to(__SCREAMING_SNAKE_CASE ), attn_mask],dim=1 )
__lowerCAmelCase = encoded_batch
with torch.no_grad():
__lowerCAmelCase = model(__SCREAMING_SNAKE_CASE,attention_mask=__SCREAMING_SNAKE_CASE ).logits
__lowerCAmelCase = out_logits[..., :-1, :].contiguous()
__lowerCAmelCase = labels[..., 1:].contiguous()
__lowerCAmelCase = attn_mask[..., 1:].contiguous()
__lowerCAmelCase = torch.expa(
(loss_fct(shift_logits.transpose(1,2 ),__SCREAMING_SNAKE_CASE ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__SCREAMING_SNAKE_CASE )}
| 689 | 0 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase = "cpu" , __UpperCAmelCase = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
__lowerCamelCase = device
__lowerCamelCase = CLIPTokenizerFast.from_pretrained(lowerCAmelCase_ )
__lowerCamelCase = [0.48_145_466, 0.4_578_275, 0.40_821_073]
__lowerCamelCase = [0.26_862_954, 0.26_130_258, 0.27_577_711]
__lowerCamelCase = torchvision.transforms.Normalize(self.image_mean , self.image_std )
__lowerCamelCase = torchvision.transforms.Resize(224 )
__lowerCamelCase = torchvision.transforms.CenterCrop(224 )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.resize(lowerCAmelCase_ )
__lowerCamelCase = self.center_crop(lowerCAmelCase_ )
__lowerCamelCase = self.normalize(lowerCAmelCase_ )
return images
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.tokenizer(text=lowerCAmelCase_ , **lowerCAmelCase_ )
__lowerCamelCase = self.preprocess_img(lowerCAmelCase_ )
__lowerCamelCase = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class __lowerCAmelCase ( nn.Module ):
def __init__( self , __UpperCAmelCase=10 , __UpperCAmelCase=0.01 , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase="image" , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , ):
'''simple docstring'''
super().__init__()
__lowerCamelCase = None
__lowerCamelCase = device if device else get_device()
if vqgan:
__lowerCamelCase = vqgan
else:
__lowerCamelCase = load_vqgan(self.device , conf_path=lowerCAmelCase_ , ckpt_path=lowerCAmelCase_ )
self.vqgan.eval()
if clip:
__lowerCamelCase = clip
else:
__lowerCamelCase = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
__lowerCamelCase = ProcessorGradientFlow(device=self.device )
__lowerCamelCase = iterations
__lowerCamelCase = lr
__lowerCamelCase = log
__lowerCamelCase = make_grid
__lowerCamelCase = return_val
__lowerCamelCase = quantize
__lowerCamelCase = self.vqgan.decoder.z_shape
def lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=5 , __UpperCAmelCase=True ):
'''simple docstring'''
__lowerCamelCase = []
if output_path is None:
__lowerCamelCase = '''./animation.gif'''
if input_path is None:
__lowerCamelCase = self.save_path
__lowerCamelCase = sorted(glob(input_path + '''/*''' ) )
if not len(lowerCAmelCase_ ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(lowerCAmelCase_ ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
__lowerCamelCase = total_duration / len(lowerCAmelCase_ )
__lowerCamelCase = [frame_duration] * len(lowerCAmelCase_ )
if extend_frames:
__lowerCamelCase = 1.5
__lowerCamelCase = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(lowerCAmelCase_ ) )
imageio.mimsave(lowerCAmelCase_ , lowerCAmelCase_ , duration=lowerCAmelCase_ )
print(F"""gif saved to {output_path}""" )
def lowerCamelCase ( self , __UpperCAmelCase=None , __UpperCAmelCase=None ):
'''simple docstring'''
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
__lowerCamelCase = preprocess(Image.open(lowerCAmelCase_ ) , target_image_size=256 ).to(self.device )
__lowerCamelCase = preprocess_vqgan(lowerCAmelCase_ )
__lowerCamelCase ,*__lowerCamelCase = self.vqgan.encode(lowerCAmelCase_ )
return z
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.latent.detach().requires_grad_()
__lowerCamelCase = base_latent + transform_vector
if self.quantize:
__lowerCamelCase ,*__lowerCamelCase = self.vqgan.quantize(lowerCAmelCase_ )
else:
__lowerCamelCase = trans_latent
return self.vqgan.decode(lowerCAmelCase_ )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ):
'''simple docstring'''
__lowerCamelCase = self.clip_preprocessor(text=lowerCAmelCase_ , images=lowerCAmelCase_ , return_tensors='''pt''' , padding=lowerCAmelCase_ )
__lowerCamelCase = self.clip(**lowerCAmelCase_ )
__lowerCamelCase = clip_outputs.logits_per_image
if weights is not None:
__lowerCamelCase = similarity_logits * weights
return similarity_logits.sum()
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self._get_clip_similarity(pos_prompts['''prompts'''] , lowerCAmelCase_ , weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
__lowerCamelCase = self._get_clip_similarity(neg_prompts['''prompts'''] , lowerCAmelCase_ , weights=neg_prompts['''weights'''] )
else:
__lowerCamelCase = torch.tensor([1] , device=self.device )
__lowerCamelCase = -torch.log(lowerCAmelCase_ ) + torch.log(lowerCAmelCase_ )
return loss
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = torch.randn_like(self.latent , requires_grad=lowerCAmelCase_ , device=self.device )
__lowerCamelCase = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
__lowerCamelCase = self._add_vector(lowerCAmelCase_ )
__lowerCamelCase = loop_post_process(lowerCAmelCase_ )
__lowerCamelCase = self._get_CLIP_loss(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
print('''CLIP loss''' , lowerCAmelCase_ )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=lowerCAmelCase_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
wandb.init(reinit=lowerCAmelCase_ , project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
__lowerCamelCase = Image.open(lowerCAmelCase_ )
__lowerCamelCase = image.resize((256, 256) )
wandb.log('''Original Image''' , wandb.Image(lowerCAmelCase_ ) )
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
if not prompts:
return []
__lowerCamelCase = []
__lowerCamelCase = []
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowerCamelCase = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(lowerCAmelCase_ , (tuple, list) ):
__lowerCamelCase = prompt[0]
__lowerCamelCase = float(prompt[1] )
elif ":" in prompt:
__lowerCamelCase ,__lowerCamelCase = prompt.split(''':''' )
__lowerCamelCase = float(lowerCAmelCase_ )
else:
__lowerCamelCase = prompt
__lowerCamelCase = 1.0
processed_prompts.append(lowerCAmelCase_ )
weights.append(lowerCAmelCase_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(lowerCAmelCase_ , device=self.device ),
}
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=None , ):
'''simple docstring'''
if image_path:
__lowerCamelCase = self._get_latent(lowerCAmelCase_ )
else:
__lowerCamelCase = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
assert pos_prompts, "You must provide at least one positive prompt."
__lowerCamelCase = self.process_prompts(lowerCAmelCase_ )
__lowerCamelCase = self.process_prompts(lowerCAmelCase_ )
if save_final and save_path is None:
__lowerCamelCase = os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(lowerCAmelCase_ ):
os.makedirs(lowerCAmelCase_ )
else:
__lowerCamelCase = save_path + '''_''' + get_timestamp()
os.makedirs(lowerCAmelCase_ )
__lowerCamelCase = save_path
__lowerCamelCase = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(lowerCAmelCase_ ) )
__lowerCamelCase = loop_post_process(lowerCAmelCase_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ):
if show_intermediate:
show_pil(lowerCAmelCase_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F"""iter_{iter:03d}.png""" ) )
if self.log:
wandb.log({'''Image''': wandb.Image(lowerCAmelCase_ )} )
if show_final:
show_pil(lowerCAmelCase_ )
if save_final:
transformed_img.save(os.path.join(self.save_path , F"""iter_{iter:03d}_final.png""" ) )
| 719 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __lowerCAmelCase ( lowerCAmelCase__ ):
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = 8
# DPR tok
__lowerCamelCase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__lowerCamelCase = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
__lowerCamelCase = os.path.join(__UpperCAmelCase , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
__lowerCamelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__lowerCamelCase = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
__lowerCamelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__lowerCamelCase = {'''unk_token''': '''<unk>'''}
__lowerCamelCase = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
__lowerCamelCase = os.path.join(__UpperCAmelCase , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCamelCase = os.path.join(__UpperCAmelCase , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__UpperCAmelCase ) )
def lowerCamelCase ( self ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def lowerCamelCase ( self ):
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def lowerCamelCase ( self ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def lowerCamelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_dataset()
__lowerCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
__lowerCamelCase = dataset
__lowerCamelCase = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_dataset()
__lowerCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
__lowerCamelCase = os.path.join(self.tmpdirname , '''dataset''' )
__lowerCamelCase = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
__lowerCamelCase = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
__lowerCamelCase = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , __UpperCAmelCase ) , )
return retriever
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
__lowerCamelCase = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
__lowerCamelCase = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
__lowerCamelCase = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(__UpperCAmelCase , open(__UpperCAmelCase , '''wb''' ) )
__lowerCamelCase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
__lowerCamelCase = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_canonical_hf_index_retriever()
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
__lowerCamelCase = self.get_dummy_dataset()
retriever.save_pretrained(__UpperCAmelCase )
__lowerCamelCase = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCAmelCase )
__lowerCamelCase = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCAmelCase )
__lowerCamelCase = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_legacy_index_retriever()
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=__UpperCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , __UpperCAmelCase )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(__UpperCAmelCase )
__lowerCamelCase = RagRetriever.from_pretrained(__UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever.retrieve(__UpperCAmelCase , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def lowerCamelCase ( self ):
'''simple docstring'''
import torch
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_canonical_hf_index_retriever()
__lowerCamelCase = [[5, 7], [10, 11]]
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever(__UpperCAmelCase , __UpperCAmelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCAmelCase )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
__lowerCamelCase = retriever(
__UpperCAmelCase , __UpperCAmelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCAmelCase , return_tensors='''pt''' , )
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.get_dpr_ctx_encoder_tokenizer()
__lowerCamelCase = 1
__lowerCamelCase = self.get_dummy_custom_hf_index_retriever(from_disk=__UpperCAmelCase )
retriever.set_ctx_encoder_tokenizer(__UpperCAmelCase )
__lowerCamelCase = [[5, 7], [10, 11]]
__lowerCamelCase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__lowerCamelCase = retriever(__UpperCAmelCase , __UpperCAmelCase , prefix=retriever.config.generator.prefix , n_docs=__UpperCAmelCase )
self.assertEqual(
len(__UpperCAmelCase ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , __UpperCAmelCase ) # check for doc token related keys in dictionary.
| 622 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __lowerCamelCase ( self : int):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =ort.SessionOptions()
__lowercase =False
return options
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png')
__lowercase =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png')
__lowercase =OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' , revision='onnx' , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCAmelCase)
__lowercase ='A red cat sitting on a park bench'
__lowercase =np.random.RandomState(0)
__lowercase =pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , mask_image=_lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=1_0 , generator=_lowerCAmelCase , output_type='np' , )
__lowercase =output.images
__lowercase =images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase =np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def __lowerCamelCase ( self : str):
'''simple docstring'''
__lowercase =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png')
__lowercase =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png')
__lowercase =LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-inpainting' , subfolder='scheduler' , revision='onnx')
__lowercase =OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' , revision='onnx' , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCAmelCase)
__lowercase ='A red cat sitting on a park bench'
__lowercase =np.random.RandomState(0)
__lowercase =pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , mask_image=_lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2_0 , generator=_lowerCAmelCase , output_type='np' , )
__lowercase =output.images
__lowercase =images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase =np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
| 474 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
lowerCamelCase = logging.getLogger(__name__)
lowerCamelCase = tf.data.AUTOTUNE
def _A ( ):
"""simple docstring"""
__lowercase =argparse.ArgumentParser(description='Train a masked language model on TPU.' )
parser.add_argument(
'--pretrained_model_config' , type=_lowerCAmelCase , default='roberta-base' , help='The model config to use. Note that we don\'t copy the model\'s weights, only the config!' , )
parser.add_argument(
'--tokenizer' , type=_lowerCAmelCase , default='unigram-tokenizer-wikitext' , help='The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.' , )
parser.add_argument(
'--per_replica_batch_size' , type=_lowerCAmelCase , default=8 , help='Batch size per TPU core.' , )
parser.add_argument(
'--no_tpu' , action='store_true' , help='If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.' , )
parser.add_argument(
'--tpu_name' , type=_lowerCAmelCase , help='Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.' , default='local' , )
parser.add_argument(
'--tpu_zone' , type=_lowerCAmelCase , help='Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.' , )
parser.add_argument(
'--gcp_project' , type=_lowerCAmelCase , help='Google cloud project name. Only used for non-Colab TPU nodes.' )
parser.add_argument(
'--bfloat16' , action='store_true' , help='Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.' , )
parser.add_argument(
'--train_dataset' , type=_lowerCAmelCase , help='Path to training dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.' , )
parser.add_argument(
'--shuffle_buffer_size' , type=_lowerCAmelCase , default=2**18 , help='Size of the shuffle buffer (in samples)' , )
parser.add_argument(
'--eval_dataset' , type=_lowerCAmelCase , help='Path to evaluation dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.' , )
parser.add_argument(
'--num_epochs' , type=_lowerCAmelCase , default=1 , help='Number of epochs to train for.' , )
parser.add_argument(
'--learning_rate' , type=_lowerCAmelCase , default=1e-4 , help='Learning rate to use for training.' , )
parser.add_argument(
'--weight_decay_rate' , type=_lowerCAmelCase , default=1e-3 , help='Weight decay rate to use for training.' , )
parser.add_argument(
'--max_length' , type=_lowerCAmelCase , default=512 , help='Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py' , )
parser.add_argument(
'--mlm_probability' , type=_lowerCAmelCase , default=0.15 , help='Fraction of tokens to mask during training.' , )
parser.add_argument('--output_dir' , type=_lowerCAmelCase , required=_lowerCAmelCase , help='Path to save model checkpoints to.' )
parser.add_argument('--hub_model_id' , type=_lowerCAmelCase , help='Model ID to upload to on the Hugging Face Hub.' )
__lowercase =parser.parse_args()
return args
def _A ( _lowerCAmelCase ):
"""simple docstring"""
try:
if args.tpu_name:
__lowercase =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
__lowercase =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '
'--gcp_project. When running on a TPU VM, use --tpu_name local.' )
tf.config.experimental_connect_to_cluster(_lowerCAmelCase )
tf.tpu.experimental.initialize_tpu_system(_lowerCAmelCase )
return tpu
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =0
for file in file_list:
__lowercase =file.split('/' )[-1]
__lowercase =re.search(r'-\d+-(\d+)\.tfrecord' , _lowerCAmelCase ).group(1 )
__lowercase =int(_lowerCAmelCase )
num_samples += sample_count
return num_samples
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ):
"""simple docstring"""
__lowercase =count_samples(_lowerCAmelCase )
__lowercase =tf.data.Dataset.from_tensor_slices(_lowerCAmelCase )
if shuffle:
__lowercase =dataset.shuffle(len(_lowerCAmelCase ) )
__lowercase =tf.data.TFRecordDataset(_lowerCAmelCase , num_parallel_reads=_lowerCAmelCase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
__lowercase =dataset.apply(tf.data.experimental.assert_cardinality(_lowerCAmelCase ) )
__lowercase =dataset.map(_lowerCAmelCase , num_parallel_calls=_lowerCAmelCase )
if shuffle:
assert shuffle_buffer_size is not None
__lowercase =dataset.shuffle(args.shuffle_buffer_size )
__lowercase =dataset.batch(_lowerCAmelCase , drop_remainder=_lowerCAmelCase )
__lowercase =dataset.map(_lowerCAmelCase , num_parallel_calls=_lowerCAmelCase )
__lowercase =dataset.prefetch(_lowerCAmelCase )
return dataset
def _A ( _lowerCAmelCase ):
"""simple docstring"""
if not args.no_tpu:
__lowercase =initialize_tpu(_lowerCAmelCase )
__lowercase =tf.distribute.TPUStrategy(_lowerCAmelCase )
else:
__lowercase =tf.distribute.OneDeviceStrategy(device='/gpu:0' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('mixed_bfloat16' )
__lowercase =AutoTokenizer.from_pretrained(args.tokenizer )
__lowercase =AutoConfig.from_pretrained(args.pretrained_model_config )
__lowercase =tokenizer.vocab_size
__lowercase =tf.io.gfile.glob(os.path.join(args.train_dataset , '*.tfrecord' ) )
if not training_records:
raise ValueError(f"""No .tfrecord files found in {args.train_dataset}.""" )
__lowercase =tf.io.gfile.glob(os.path.join(args.eval_dataset , '*.tfrecord' ) )
if not eval_records:
raise ValueError(f"""No .tfrecord files found in {args.eval_dataset}.""" )
__lowercase =count_samples(_lowerCAmelCase )
__lowercase =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
__lowercase =steps_per_epoch * args.num_epochs
with strategy.scope():
__lowercase =TFAutoModelForMaskedLM.from_config(_lowerCAmelCase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
__lowercase , __lowercase =create_optimizer(
num_train_steps=_lowerCAmelCase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=_lowerCAmelCase , metrics=['accuracy'] )
def decode_fn(_lowerCAmelCase ):
__lowercase ={
'input_ids': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'attention_mask': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(_lowerCAmelCase , _lowerCAmelCase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
__lowercase =DataCollatorForLanguageModeling(
tokenizer=_lowerCAmelCase , mlm_probability=args.mlm_probability , mlm=_lowerCAmelCase , return_tensors='tf' )
def mask_with_collator(_lowerCAmelCase ):
# TF really needs an isin() function
__lowercase =(
~tf.cast(batch['attention_mask'] , tf.bool )
| (batch['input_ids'] == tokenizer.cls_token_id)
| (batch['input_ids'] == tokenizer.sep_token_id)
)
__lowercase , __lowercase =data_collator.tf_mask_tokens(
batch['input_ids'] , vocab_size=len(_lowerCAmelCase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=_lowerCAmelCase , )
return batch
__lowercase =args.per_replica_batch_size * strategy.num_replicas_in_sync
__lowercase =prepare_dataset(
_lowerCAmelCase , decode_fn=_lowerCAmelCase , mask_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase , shuffle=_lowerCAmelCase , shuffle_buffer_size=args.shuffle_buffer_size , )
__lowercase =prepare_dataset(
_lowerCAmelCase , decode_fn=_lowerCAmelCase , mask_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase , shuffle=_lowerCAmelCase , )
__lowercase =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=_lowerCAmelCase ) )
model.fit(
_lowerCAmelCase , validation_data=_lowerCAmelCase , epochs=args.num_epochs , callbacks=_lowerCAmelCase , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
lowerCamelCase = parse_args()
main(args)
| 474 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""",
}
class snake_case_ ( lowercase_ ,lowercase_ ):
__lowerCAmelCase = '''convnextv2'''
def __init__( self , a_=3 , a_=4 , a_=4 , a_=None , a_=None , a_="gelu" , a_=0.02 , a_=1e-12 , a_=0.0 , a_=2_2_4 , a_=None , a_=None , **a_ , ):
super().__init__(**a_ )
a_ : int = num_channels
a_ : List[str] = patch_size
a_ : Optional[Any] = num_stages
a_ : Dict = [9_6, 1_9_2, 3_8_4, 7_6_8] if hidden_sizes is None else hidden_sizes
a_ : str = [3, 3, 9, 3] if depths is None else depths
a_ : Any = hidden_act
a_ : List[Any] = initializer_range
a_ : Tuple = layer_norm_eps
a_ : Optional[int] = drop_path_rate
a_ : Tuple = image_size
a_ : List[str] = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
a_ , a_ : Optional[int] = get_aligned_output_features_output_indices(
out_features=a_ , out_indices=a_ , stage_names=self.stage_names ) | 720 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class snake_case_ ( a_ ):
__lowerCAmelCase = "blenderbot-small"
__lowerCAmelCase = ["past_key_values"]
__lowerCAmelCase = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , a_=5_0_2_6_5 , a_=5_1_2 , a_=8 , a_=2_0_4_8 , a_=1_6 , a_=8 , a_=2_0_4_8 , a_=1_6 , a_=0.0 , a_=0.0 , a_=True , a_=True , a_="gelu" , a_=5_1_2 , a_=0.1 , a_=0.0 , a_=0.0 , a_=0.02 , a_=1 , a_=False , a_=0 , a_=1 , a_=2 , a_=2 , **a_ , ):
a_ : int = vocab_size
a_ : Any = max_position_embeddings
a_ : Optional[int] = d_model
a_ : Tuple = encoder_ffn_dim
a_ : List[Any] = encoder_layers
a_ : Optional[int] = encoder_attention_heads
a_ : Optional[int] = decoder_ffn_dim
a_ : List[str] = decoder_layers
a_ : Dict = decoder_attention_heads
a_ : List[str] = dropout
a_ : List[Any] = attention_dropout
a_ : List[str] = activation_dropout
a_ : Optional[Any] = activation_function
a_ : List[Any] = init_std
a_ : int = encoder_layerdrop
a_ : Optional[int] = decoder_layerdrop
a_ : List[str] = use_cache
a_ : Optional[int] = encoder_layers
a_ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , is_encoder_decoder=a_ , decoder_start_token_id=a_ , forced_eos_token_id=a_ , **a_ , )
class snake_case_ ( a_ ):
@property
def snake_case_ ( self ):
if self.task in ["default", "seq2seq-lm"]:
a_ : Optional[int] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
a_ : Tuple = {0: "batch"}
a_ : int = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
a_ : List[str] = {0: "batch", 1: "decoder_sequence"}
a_ : Optional[int] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(a_ , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
a_ : Dict = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
a_ , a_ : Optional[int] = self.num_layers
for i in range(a_ ):
a_ : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"}
a_ : List[Any] = {0: "batch", 2: "past_sequence + sequence"}
else:
a_ : List[str] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def snake_case_ ( self ):
if self.task in ["default", "seq2seq-lm"]:
a_ : List[Any] = super().outputs
else:
a_ : Tuple = super(a_ , self ).outputs
if self.use_past:
a_ , a_ : Dict = self.num_layers
for i in range(a_ ):
a_ : List[Any] = {0: "batch", 2: "past_sequence + sequence"}
a_ : List[Any] = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def snake_case_ ( self , a_ , a_ = -1 , a_ = -1 , a_ = False , a_ = None , ):
a_ : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a_ , a_ , a_ , a_ , a_ )
# Generate decoder inputs
a_ : Optional[int] = seq_length if not self.use_past else 1
a_ : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a_ , a_ , a_ , a_ , a_ )
a_ : int = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
a_ : Tuple = dict(**a_ , **a_ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
a_ , a_ : Optional[int] = common_inputs["input_ids"].shape
a_ : str = common_inputs["decoder_input_ids"].shape[1]
a_ , a_ : Dict = self.num_attention_heads
a_ : List[str] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
a_ : Optional[Any] = decoder_seq_length + 3
a_ : str = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
a_ : Dict = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(a_ , a_ )] , dim=1 )
a_ : Optional[int] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
a_ , a_ : Tuple = self.num_layers
a_ : str = min(a_ , a_ )
a_ : Dict = max(a_ , a_ ) - min_num_layers
a_ : Union[str, Any] = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(a_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(a_ ),
torch.zeros(a_ ),
torch.zeros(a_ ),
torch.zeros(a_ ),
) )
# TODO: test this.
a_ : Optional[int] = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(a_ , a_ ):
common_inputs["past_key_values"].append((torch.zeros(a_ ), torch.zeros(a_ )) )
return common_inputs
def snake_case_ ( self , a_ , a_ = -1 , a_ = -1 , a_ = False , a_ = None , ):
a_ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a_ , a_ , a_ , a_ , a_ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
a_ , a_ : Dict = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
a_ : int = seqlen + 2
a_ , a_ : Optional[int] = self.num_layers
a_ , a_ : Optional[int] = self.num_attention_heads
a_ : Optional[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
a_ : str = common_inputs["attention_mask"].dtype
a_ : Tuple = torch.cat(
[common_inputs["attention_mask"], torch.ones(a_ , a_ , dtype=a_ )] , dim=1 )
a_ : Optional[int] = [
(torch.zeros(a_ ), torch.zeros(a_ )) for _ in range(a_ )
]
return common_inputs
def snake_case_ ( self , a_ , a_ = -1 , a_ = -1 , a_ = False , a_ = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
a_ : Optional[Any] = compute_effective_axis_dimension(
a_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
a_ : Tuple = tokenizer.num_special_tokens_to_add(a_ )
a_ : Union[str, Any] = compute_effective_axis_dimension(
a_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=a_ )
# Generate dummy inputs according to compute batch and sequence
a_ : Union[str, Any] = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
a_ : str = dict(tokenizer(a_ , return_tensors=a_ ) )
return common_inputs
def snake_case_ ( self , a_ , a_ = -1 , a_ = -1 , a_ = False , a_ = None , ):
if self.task in ["default", "seq2seq-lm"]:
a_ : List[str] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
a_ , batch_size=a_ , seq_length=a_ , is_pair=a_ , framework=a_ )
elif self.task == "causal-lm":
a_ : List[Any] = self._generate_dummy_inputs_for_causal_lm(
a_ , batch_size=a_ , seq_length=a_ , is_pair=a_ , framework=a_ )
else:
a_ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
a_ , batch_size=a_ , seq_length=a_ , is_pair=a_ , framework=a_ )
return common_inputs
def snake_case_ ( self , a_ , a_ , a_ , a_ ):
if self.task in ["default", "seq2seq-lm"]:
a_ : Optional[int] = super()._flatten_past_key_values_(a_ , a_ , a_ , a_ )
else:
a_ : int = super(a_ , self )._flatten_past_key_values_(
a_ , a_ , a_ , a_ ) | 370 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Dict = logging.get_logger(__name__)
__A : str = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = "swin2sr"
lowerCamelCase__ = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Dict , __lowerCamelCase : Optional[int]=64 , __lowerCamelCase : int=1 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : Any=180 , __lowerCamelCase : str=[6, 6, 6, 6, 6, 6] , __lowerCamelCase : Dict=[6, 6, 6, 6, 6, 6] , __lowerCamelCase : Optional[Any]=8 , __lowerCamelCase : Any=2.0 , __lowerCamelCase : int=True , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : Dict=0.0 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Tuple="gelu" , __lowerCamelCase : List[str]=False , __lowerCamelCase : Any=0.02 , __lowerCamelCase : Optional[int]=1e-5 , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : List[str]=1.0 , __lowerCamelCase : Union[str, Any]="1conv" , __lowerCamelCase : List[Any]="pixelshuffle" , **__lowerCamelCase : Optional[Any] , ):
super().__init__(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = embed_dim
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE = num_heads
SCREAMING_SNAKE_CASE = window_size
SCREAMING_SNAKE_CASE = mlp_ratio
SCREAMING_SNAKE_CASE = qkv_bias
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = use_absolute_embeddings
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = upscale
SCREAMING_SNAKE_CASE = img_range
SCREAMING_SNAKE_CASE = resi_connection
SCREAMING_SNAKE_CASE = upsampler | 16 |
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : int = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''',
'''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''',
'''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
lowercase__ : Dict = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _lowerCAmelCase ( __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Any , __snake_case : List[str] ) -> Union[str, Any]:
for attribute in key.split('.' ):
__A : int = getattr(__snake_case , __snake_case )
if weight_type is not None:
__A : Optional[int] = getattr(__snake_case , __snake_case ).shape
else:
__A : List[str] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
__A : Tuple = value
elif weight_type == "weight_g":
__A : Union[str, Any] = value
elif weight_type == "weight_v":
__A : Optional[Any] = value
elif weight_type == "bias":
__A : Optional[int] = value
else:
__A : Optional[int] = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowerCAmelCase ( __snake_case : Any , __snake_case : List[str] ) -> List[Any]:
__A : Optional[Any] = []
__A : Any = fairseq_model.state_dict()
__A : Union[str, Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__A : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == 'group' , )
__A : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__A : int = True
if "*" in mapped_key:
__A : Any = name.split(__snake_case )[0].split('.' )[-2]
__A : List[Any] = mapped_key.replace('*' , __snake_case )
if "weight_g" in name:
__A : Optional[Any] = 'weight_g'
elif "weight_v" in name:
__A : Union[str, Any] = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
__A : Optional[Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__A : Tuple = 'weight'
else:
__A : Dict = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(f'Unused weights: {unused_weights}' )
def _lowerCAmelCase ( __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Optional[int] ) -> int:
__A : int = full_name.split('conv_layers.' )[-1]
__A : List[str] = name.split('.' )
__A : Optional[int] = int(items[0] )
__A : str = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
__A : Optional[int] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
__A : Union[str, Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
__A : Dict = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
__A : Any = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__snake_case )
@torch.no_grad()
def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Tuple=None ) -> Any:
# load the pre-trained checkpoints
__A : List[str] = torch.load(__snake_case )
__A : Dict = WavLMConfigOrig(checkpoint['cfg'] )
__A : Optional[int] = WavLMOrig(__snake_case )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
__A : List[Any] = WavLMConfig.from_pretrained(__snake_case )
else:
__A : Dict = WavLMConfig()
__A : Optional[Any] = WavLMModel(__snake_case )
recursively_load_weights(__snake_case , __snake_case )
hf_wavlm.save_pretrained(__snake_case )
if __name__ == "__main__":
lowercase__ : Tuple = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowercase__ : Any = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 8 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Optional[int] = {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/config.json',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:Optional[int] = 'xglm'
SCREAMING_SNAKE_CASE:Dict = ['past_key_values']
SCREAMING_SNAKE_CASE:Any = {
'num_attention_heads': 'attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'num_layers',
}
def __init__( self , _a=25_6008 , _a=2048 , _a=1024 , _a=4096 , _a=24 , _a=16 , _a="gelu" , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.0 , _a=0.02 , _a=True , _a=True , _a=2 , _a=1 , _a=0 , _a=2 , **_a , ):
"""simple docstring"""
a__ = vocab_size
a__ = max_position_embeddings
a__ = d_model
a__ = ffn_dim
a__ = num_layers
a__ = attention_heads
a__ = activation_function
a__ = dropout
a__ = attention_dropout
a__ = activation_dropout
a__ = layerdrop
a__ = init_std
a__ = scale_embedding # scale factor will be sqrt(d_model) if True
a__ = use_cache
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , decoder_start_token_id=_a , **_a , )
| 720 |
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__A : str = 16
__A : Union[str, Any] = 32
def lowerCAmelCase_ ( a : str ):
return int(x / 2**20 )
class _UpperCamelCase :
'''simple docstring'''
def __enter__( self ):
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
a__ = torch.cuda.memory_allocated()
return self
def __exit__( self , *_a ):
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
a__ = torch.cuda.memory_allocated()
a__ = torch.cuda.max_memory_allocated()
a__ = bamb(self.end - self.begin )
a__ = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def lowerCAmelCase_ ( a : Accelerator , a : int = 16 , a : str = "bert-base-cased" , a : int = 320 , a : int = 160 , ):
a__ = AutoTokenizer.from_pretrained(a )
a__ = load_dataset(
'glue' , 'mrpc' , split={'train': f'''train[:{n_train}]''', 'validation': f'''validation[:{n_val}]'''} )
def tokenize_function(a : Any ):
# max_length=None => use the model max length (it's actually the default)
a__ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=a , max_length=a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
a__ = datasets.map(
a , batched=a , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=a )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
a__ = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(a : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(a , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(a , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
a__ = DataLoader(
tokenized_datasets['train'] , shuffle=a , collate_fn=a , batch_size=a )
a__ = DataLoader(
tokenized_datasets['validation'] , shuffle=a , collate_fn=a , batch_size=a )
return train_dataloader, eval_dataloader
def lowerCAmelCase_ ( a : Optional[int] , a : Optional[Any] ):
# Initialize accelerator
a__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
a__ = config['lr']
a__ = int(config['num_epochs'] )
a__ = int(config['seed'] )
a__ = int(config['batch_size'] )
a__ = args.model_name_or_path
set_seed(a )
a__ , a__ = get_dataloaders(a , a , a , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
a__ = AutoModelForSequenceClassification.from_pretrained(a , return_dict=a )
# Instantiate optimizer
a__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
a__ = optimizer_cls(params=model.parameters() , lr=a )
if accelerator.state.deepspeed_plugin is not None:
a__ = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
a__ = 1
a__ = (len(a ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
a__ = get_linear_schedule_with_warmup(
optimizer=a , num_warmup_steps=0 , num_training_steps=a , )
else:
a__ = DummyScheduler(a , total_num_steps=a , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
a__ , a__ , a__ , a__ , a__ = accelerator.prepare(
a , a , a , a , a )
# We need to keep track of how many total steps we have iterated over
a__ = 0
# We also need to keep track of the stating epoch so files are named properly
a__ = 0
# Now we train the model
a__ = {}
for epoch in range(a , a ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(a ):
a__ = model(**a )
a__ = outputs.loss
a__ = loss / gradient_accumulation_steps
accelerator.backward(a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
a__ = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f'''epoch-{epoch}'''] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(a , a )
def lowerCAmelCase_ ( ):
a__ = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=a , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=a , )
parser.add_argument(
'--output_dir' , type=a , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=a , default=a , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=a , default=320 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=a , default=160 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=a , default=1 , help='Number of train epochs.' , )
a__ = parser.parse_args()
a__ = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(a , a )
if __name__ == "__main__":
main()
| 126 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a_ : int = {
"""configuration_vision_text_dual_encoder""": ["""VisionTextDualEncoderConfig"""],
"""processing_vision_text_dual_encoder""": ["""VisionTextDualEncoderProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = ["""VisionTextDualEncoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Dict = ["""FlaxVisionTextDualEncoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = ["""TFVisionTextDualEncoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
a_ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 676 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Union[str, Any] = logging.get_logger(__name__)
a_ : Tuple = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Union[str, Any] ='informer'
lowercase : Union[str, Any] ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = "student_t", lowerCAmelCase = "nll", lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = "mean", lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = 0, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = 64, lowerCAmelCase = 32, lowerCAmelCase = 32, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = 2, lowerCAmelCase = True, lowerCAmelCase = "gelu", lowerCAmelCase = 0.0_5, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 0.1, lowerCAmelCase = 100, lowerCAmelCase = 0.0_2, lowerCAmelCase=True, lowerCAmelCase = "prob", lowerCAmelCase = 5, lowerCAmelCase = True, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =prediction_length
lowerCamelCase_ =context_length or prediction_length
lowerCamelCase_ =distribution_output
lowerCamelCase_ =loss
lowerCamelCase_ =input_size
lowerCamelCase_ =num_time_features
lowerCamelCase_ =lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowerCamelCase_ =scaling
lowerCamelCase_ =num_dynamic_real_features
lowerCamelCase_ =num_static_real_features
lowerCamelCase_ =num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
lowerCamelCase_ =cardinality
else:
lowerCamelCase_ =[0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
lowerCamelCase_ =embedding_dimension
else:
lowerCamelCase_ =[min(50, (cat + 1) // 2 ) for cat in self.cardinality]
lowerCamelCase_ =num_parallel_samples
# Transformer architecture configuration
lowerCamelCase_ =input_size * len(self.lags_sequence ) + self._number_of_features
lowerCamelCase_ =d_model
lowerCamelCase_ =encoder_attention_heads
lowerCamelCase_ =decoder_attention_heads
lowerCamelCase_ =encoder_ffn_dim
lowerCamelCase_ =decoder_ffn_dim
lowerCamelCase_ =encoder_layers
lowerCamelCase_ =decoder_layers
lowerCamelCase_ =dropout
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =activation_dropout
lowerCamelCase_ =encoder_layerdrop
lowerCamelCase_ =decoder_layerdrop
lowerCamelCase_ =activation_function
lowerCamelCase_ =init_std
lowerCamelCase_ =use_cache
# Informer
lowerCamelCase_ =attention_type
lowerCamelCase_ =sampling_factor
lowerCamelCase_ =distil
super().__init__(is_encoder_decoder=lowerCAmelCase, **lowerCAmelCase )
@property
def lowercase__ ( self ):
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 676 | 1 |
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ) -> int:
'''simple docstring'''
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(_UpperCamelCase , int(b / 2 ) ) * actual_power(_UpperCamelCase , int(b / 2 ) )
else:
return a * actual_power(_UpperCamelCase , int(b / 2 ) ) * actual_power(_UpperCamelCase , int(b / 2 ) )
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ) -> float:
'''simple docstring'''
if b < 0:
return 1 / actual_power(_UpperCamelCase , _UpperCamelCase )
return actual_power(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 242 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 242 | 1 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case : Tuple = logging.get_logger(__name__)
_snake_case : int = {'vocab_file': 'vocab.json'}
_snake_case : List[str] = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
_snake_case : Optional[int] = {'mgp-str': 27}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict="[GO]" , lowerCAmelCase_ : List[Any]="[GO]" , lowerCAmelCase_ : List[str]="[s]" , lowerCAmelCase_ : Dict="[GO]" , **lowerCAmelCase_ : Optional[Any] ) -> List[Any]:
super().__init__(
unk_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , **lowerCAmelCase_ , )
with open(lowerCAmelCase_ , encoding='utf-8' ) as vocab_handle:
__lowerCAmelCase = json.load(lowerCAmelCase_ )
__lowerCAmelCase = {v: k for k, v in self.vocab.items()}
@property
def lowercase ( self : str ) -> Any:
return len(self.vocab )
def lowercase ( self : Optional[Any] ) -> List[Any]:
return dict(self.vocab , **self.added_tokens_encoder )
def lowercase ( self : Dict , lowerCAmelCase_ : Optional[int] ) -> str:
__lowerCAmelCase = []
for s in text:
char_tokens.extend(lowerCAmelCase_ )
return char_tokens
def lowercase ( self : str , lowerCAmelCase_ : str ) -> Optional[int]:
return self.vocab.get(lowerCAmelCase_ , self.vocab.get(self.unk_token ) )
def lowercase ( self : Dict , lowerCAmelCase_ : Any ) -> Dict:
return self.decoder.get(lowerCAmelCase_ )
def lowercase ( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase_ ):
logger.error('Vocabulary path ({}) should be a directory'.format(lowerCAmelCase_ ) )
return
__lowerCAmelCase = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + '\n' )
return (vocab_file,)
| 53 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
UpperCAmelCase : Any = logging.get_logger(__name__)
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : List[Any] = ["audio_values", "audio_mask"]
def __init__( self , A=20_48 , A=1 , A=[16, 16] , A=1_28 , A=4_41_00 , A=86 , A=20_48 , A=0.0 , **A , ) -> Dict:
'''simple docstring'''
super().__init__(
feature_size=A , sampling_rate=A , padding_value=A , **A , )
lowerCamelCase = spectrogram_length
lowerCamelCase = num_channels
lowerCamelCase = patch_size
lowerCamelCase = feature_size // self.patch_size[1]
lowerCamelCase = n_fft
lowerCamelCase = sampling_rate // hop_length_to_sampling_rate
lowerCamelCase = sampling_rate
lowerCamelCase = padding_value
lowerCamelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=A , norm="""slaney""" , mel_scale="""slaney""" , ).T
def __A ( self , A ) -> np.ndarray:
'''simple docstring'''
lowerCamelCase = spectrogram(
A , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
lowerCamelCase = log_spec[:, :-1]
lowerCamelCase = log_spec - 20.0
lowerCamelCase = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , A , A = None , A = True , A = None , A = False , A = False , **A , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
F' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
F' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowerCamelCase = isinstance(A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
lowerCamelCase = is_batched_numpy or (
isinstance(A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A , np.ndarray ):
lowerCamelCase = np.asarray(A , dtype=np.floataa )
elif isinstance(A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCamelCase = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , A ):
lowerCamelCase = [np.asarray(A , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCamelCase = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCamelCase = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCamelCase = np.array(A ).astype(np.floataa )
# convert into correct format for padding
lowerCamelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCamelCase = np.ones([len(A ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCamelCase = padded_audio_features * self.padding_value
for i in range(len(A ) ):
lowerCamelCase = audio_features[i]
lowerCamelCase = feature
# return as BatchFeature
if return_attention_mask:
lowerCamelCase = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
lowerCamelCase = {"""audio_values""": padded_audio_features}
lowerCamelCase = BatchFeature(data=A , tensor_type=A )
return encoded_inputs
| 457 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class A (unittest.TestCase ):
'''simple docstring'''
def a_ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
A__ = [[1, 2, 4], [1, 2, 3, 4]]
A__ = DisjunctiveConstraint(__lowerCAmelCase )
self.assertTrue(isinstance(dc.token_ids , __lowerCAmelCase ) )
with self.assertRaises(__lowerCAmelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__lowerCAmelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def a_ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
A__ = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__lowerCAmelCase ):
DisjunctiveConstraint(__lowerCAmelCase ) # fails here
def a_ ( self : int ) -> Optional[Any]:
"""simple docstring"""
A__ = [[1, 2, 3], [1, 2, 4]]
A__ = DisjunctiveConstraint(__lowerCAmelCase )
A__ , A__ , A__ = dc.update(1 )
A__ = stepped is True and completed is False and reset is False
self.assertTrue(__lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
A__ , A__ , A__ = dc.update(2 )
A__ = stepped is True and completed is False and reset is False
self.assertTrue(__lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
A__ , A__ , A__ = dc.update(3 )
A__ = stepped is True and completed is True and reset is False
self.assertTrue(__lowerCAmelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def a_ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
A__ = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
A__ = DisjunctiveConstraint(__lowerCAmelCase )
A__ , A__ , A__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
A__ , A__ , A__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
A__ , A__ , A__ = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
A__ , A__ , A__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
A__ , A__ , A__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
A__ , A__ , A__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
A__ , A__ , A__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 247 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
A : Dict = datasets.logging.get_logger(__name__)
A : Optional[Any] = '''\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
'''
A : int = '''\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
'''
A : Union[str, Any] = '''
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting \'keep_singletons=False\', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
\'mentions\': mentions
\'muc\': MUC metric [Vilain et al, 1995]
\'bcub\': B-cubed [Bagga and Baldwin, 1998]
\'ceafe\': CEAFe [Luo et al., 2005]
\'lea\': LEA [Moosavi and Strube, 2016]
\'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric(\'coval\')
>>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',
... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',
... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',
... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',
... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',
... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}
'''
def __lowerCamelCase ( __a :Dict , __a :int , __a :int=False , __a :Optional[Any]=False , __a :int=True , __a :Optional[int]=False , __a :Dict="dummy_doc" ) -> Any:
"""simple docstring"""
A__ = {doc: key_lines}
A__ = {doc: sys_lines}
A__ = {}
A__ = 0
A__ = 0
A__ = 0
A__ = 0
A__ = 0
A__ = 0
A__ , A__ = reader.get_doc_mentions(__a , key_doc_lines[doc] , __a )
key_singletons_num += singletons_num
if NP_only or min_span:
A__ = reader.set_annotated_parse_trees(__a , key_doc_lines[doc] , __a , __a )
A__ , A__ = reader.get_doc_mentions(__a , sys_doc_lines[doc] , __a )
sys_singletons_num += singletons_num
if NP_only or min_span:
A__ = reader.set_annotated_parse_trees(__a , key_doc_lines[doc] , __a , __a )
if remove_nested:
A__ , A__ = reader.remove_nested_coref_mentions(__a , __a )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
A__ , A__ = reader.remove_nested_coref_mentions(__a , __a )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
A__ = reader.get_mention_assignments(__a , __a )
A__ = reader.get_mention_assignments(__a , __a )
A__ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
"""Number of resulting singleton clusters in the key """
F'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
F'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
"""files, respectively""" )
return doc_coref_infos
def __lowerCamelCase ( __a :Any , __a :Union[str, Any] , __a :List[str] , __a :Dict , __a :str , __a :Tuple , __a :Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
A__ = get_coref_infos(__a , __a , __a , __a , __a , __a )
A__ = {}
A__ = 0
A__ = 0
for name, metric in metrics:
A__ , A__ , A__ = evaluator.evaluate_documents(__a , __a , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'{name}/recall': recall, F'{name}/precision': precision, F'{name}/f1': fa} )
logger.info(
name.ljust(1_0 ) , F'Recall: {recall * 1_0_0:.2f}' , F' Precision: {precision * 1_0_0:.2f}' , F' F1: {fa * 1_0_0:.2f}' , )
if conll_subparts_num == 3:
A__ = (conll / 3) * 1_0_0
logger.info(F'CoNLL score: {conll:.2f}' )
output_scores.update({"""conll_score""": conll} )
return output_scores
def __lowerCamelCase ( __a :int ) -> List[Any]:
"""simple docstring"""
A__ = False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
A__ = line.split()[5]
if not parse_col == "-":
A__ = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A (datasets.Metric ):
'''simple docstring'''
def a_ ( self : int ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def a_ ( self : str , __lowerCAmelCase : Dict , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int=True , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : int=False ) -> Optional[int]:
"""simple docstring"""
A__ = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
A__ = util.check_gold_parse_annotation(__lowerCAmelCase )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
A__ = evaluate(
key_lines=__lowerCAmelCase , sys_lines=__lowerCAmelCase , metrics=__lowerCAmelCase , NP_only=__lowerCAmelCase , remove_nested=__lowerCAmelCase , keep_singletons=__lowerCAmelCase , min_span=__lowerCAmelCase , )
return score
| 247 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ = {
"""configuration_lilt""": ["""LILT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LiltConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""LILT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LiltForQuestionAnswering""",
"""LiltForSequenceClassification""",
"""LiltForTokenClassification""",
"""LiltModel""",
"""LiltPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 610 |
"""simple docstring"""
def __lowerCamelCase ( __UpperCamelCase ) -> tuple[int, int]:
"""simple docstring"""
try:
lowerCAmelCase_ : Tuple = float(__UpperCamelCase )
except ValueError:
raise ValueError("Please enter a valid number" )
lowerCAmelCase_ : Dict = decimal - int(__UpperCamelCase )
if fractional_part == 0:
return int(__UpperCamelCase ), 1
else:
lowerCAmelCase_ : Optional[int] = len(str(__UpperCamelCase ).split("." )[1] )
lowerCAmelCase_ : List[Any] = int(decimal * (10**number_of_frac_digits) )
lowerCAmelCase_ : List[str] = 10**number_of_frac_digits
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = denominator, numerator
while True:
lowerCAmelCase_ : List[Any] = dividend % divisor
if remainder == 0:
break
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = divisor, remainder
lowerCAmelCase_ , lowerCAmelCase_ : str = numerator / divisor, denominator / divisor
return int(__UpperCamelCase ), int(__UpperCamelCase )
if __name__ == "__main__":
print(F"""{decimal_to_fraction(2) = }""")
print(F"""{decimal_to_fraction(89.0) = }""")
print(F"""{decimal_to_fraction('67') = }""")
print(F"""{decimal_to_fraction('45.0') = }""")
print(F"""{decimal_to_fraction(1.5) = }""")
print(F"""{decimal_to_fraction('6.25') = }""")
print(F"""{decimal_to_fraction('78td') = }""")
| 610 | 1 |
from __future__ import annotations
import math
def A__ ( __lowerCamelCase ):
"""simple docstring"""
if num <= 0:
_lowerCAmelCase = F'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(__lowerCamelCase )
_lowerCAmelCase = [True] * (num + 1)
_lowerCAmelCase = []
_lowerCAmelCase = 2
_lowerCAmelCase = int(math.sqrt(__lowerCamelCase ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(__lowerCamelCase )
# Set multiples of start be False
for i in range(start * start, num + 1, __lowerCamelCase ):
if sieve[i] is True:
_lowerCAmelCase = False
start += 1
for j in range(end + 1, num + 1 ):
if sieve[j] is True:
prime.append(__lowerCamelCase )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 703 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __magic_name__ ( _UpperCamelCase ,unittest.TestCase ):
UpperCamelCase : List[Any] = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def _lowerCamelCase ( self , __magic_name__=0 ):
"""simple docstring"""
_lowerCAmelCase = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(__magic_name__ ) )
_lowerCAmelCase = np.random.RandomState(__magic_name__ )
_lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=__magic_name__ )
_lowerCAmelCase = self.get_dummy_inputs()
_lowerCAmelCase = pipe(**__magic_name__ ).images
_lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_lowerCAmelCase = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_lowerCAmelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
_lowerCAmelCase = self.get_dummy_inputs()
_lowerCAmelCase = pipe(**__magic_name__ ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_lowerCAmelCase = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_lowerCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__magic_name__ )
# warmup pass to apply optimizations
_lowerCAmelCase = pipe(**self.get_dummy_inputs() )
_lowerCAmelCase = self.get_dummy_inputs()
_lowerCAmelCase = pipe(**__magic_name__ ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_lowerCAmelCase = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_lowerCAmelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__magic_name__ )
_lowerCAmelCase = self.get_dummy_inputs()
_lowerCAmelCase = pipe(**__magic_name__ ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_lowerCAmelCase = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_lowerCAmelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__magic_name__ )
_lowerCAmelCase = self.get_dummy_inputs()
_lowerCAmelCase = pipe(**__magic_name__ ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_lowerCAmelCase = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
_lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__magic_name__ )
_lowerCAmelCase = self.get_dummy_inputs()
_lowerCAmelCase = pipe(**__magic_name__ ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_lowerCAmelCase = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = ort.SessionOptions()
_lowerCAmelCase = False
return options
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
_lowerCAmelCase = init_image.resize((7_6_8, 5_1_2) )
# using the PNDM scheduler by default
_lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=__magic_name__ , feature_extractor=__magic_name__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__magic_name__ )
_lowerCAmelCase = 'A fantasy landscape, trending on artstation'
_lowerCAmelCase = np.random.RandomState(0 )
_lowerCAmelCase = pipe(
prompt=__magic_name__ , image=__magic_name__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__magic_name__ , output_type='np' , )
_lowerCAmelCase = output.images
_lowerCAmelCase = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
_lowerCAmelCase = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowerCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
_lowerCAmelCase = init_image.resize((7_6_8, 5_1_2) )
_lowerCAmelCase = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' , subfolder='scheduler' , revision='onnx' )
_lowerCAmelCase = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , revision='onnx' , scheduler=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=__magic_name__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__magic_name__ )
_lowerCAmelCase = 'A fantasy landscape, trending on artstation'
_lowerCAmelCase = np.random.RandomState(0 )
_lowerCAmelCase = pipe(
prompt=__magic_name__ , image=__magic_name__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=2_0 , generator=__magic_name__ , output_type='np' , )
_lowerCAmelCase = output.images
_lowerCAmelCase = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
_lowerCAmelCase = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 309 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def A (__lowerCamelCase :Tuple , __lowerCamelCase :Dict , __lowerCamelCase :List[Any] ):
_lowerCAmelCase = UniSpeechSatForSequenceClassification.from_pretrained(__lowerCamelCase , config=__lowerCamelCase )
_lowerCAmelCase = downstream_dict["""projector.weight"""]
_lowerCAmelCase = downstream_dict["""projector.bias"""]
_lowerCAmelCase = downstream_dict["""model.post_net.linear.weight"""]
_lowerCAmelCase = downstream_dict["""model.post_net.linear.bias"""]
return model
def A (__lowerCamelCase :int , __lowerCamelCase :Optional[int] , __lowerCamelCase :Any ):
_lowerCAmelCase = UniSpeechSatForAudioFrameClassification.from_pretrained(__lowerCamelCase , config=__lowerCamelCase )
_lowerCAmelCase = downstream_dict["""model.linear.weight"""]
_lowerCAmelCase = downstream_dict["""model.linear.bias"""]
return model
def A (__lowerCamelCase :Any , __lowerCamelCase :Tuple , __lowerCamelCase :List[str] ):
_lowerCAmelCase = UniSpeechSatForXVector.from_pretrained(__lowerCamelCase , config=__lowerCamelCase )
_lowerCAmelCase = downstream_dict["""connector.weight"""]
_lowerCAmelCase = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_lowerCAmelCase = downstream_dict[
f'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
_lowerCAmelCase = downstream_dict[f'model.framelevel_feature_extractor.module.{i}.kernel.bias']
_lowerCAmelCase = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
_lowerCAmelCase = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
_lowerCAmelCase = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
_lowerCAmelCase = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
_lowerCAmelCase = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def A (__lowerCamelCase :Union[str, Any] , __lowerCamelCase :Optional[Any] , __lowerCamelCase :Any , __lowerCamelCase :Dict ):
_lowerCAmelCase = torch.load(__lowerCamelCase , map_location="""cpu""" )
_lowerCAmelCase = checkpoint["""Downstream"""]
_lowerCAmelCase = UniSpeechSatConfig.from_pretrained(__lowerCamelCase )
_lowerCAmelCase = WavaVecaFeatureExtractor.from_pretrained(
__lowerCamelCase , return_attention_mask=__lowerCamelCase , do_normalize=__lowerCamelCase )
_lowerCAmelCase = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
_lowerCAmelCase = convert_classification(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
elif arch.endswith("""ForAudioFrameClassification""" ):
_lowerCAmelCase = convert_diarization(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
elif arch.endswith("""ForXVector""" ):
_lowerCAmelCase = convert_xvector(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
raise NotImplementedError(f'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
_lowerCAmelCase = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(__lowerCamelCase )
hf_model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
_lowercase = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 5 |
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class __snake_case ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self :List[str] , UpperCamelCase__ :Dict=0.01 , UpperCamelCase__ :Union[str, Any]=1_000 ):
_a = p_stop
_a = max_length
def __iter__( self :Dict ):
_a = 0
_a = False
while not stop and count < self.max_length:
yield count
count += 1
_a = random.random() < self.p_stop
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self :Tuple , UpperCamelCase__ :Tuple , UpperCamelCase__ :Optional[Any] , UpperCamelCase__ :str=False , UpperCamelCase__ :int=True ):
_a = [
BatchSamplerShard(UpperCamelCase__ , 2 , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ )
for i in range(2 )
]
_a = [list(UpperCamelCase__ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(UpperCamelCase__ ) for shard in batch_sampler_shards] , [len(UpperCamelCase__ ) for e in expected] )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
# Check the shards when the dataset is a round multiple of total batch size.
_a = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ )
_a = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCamelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_a = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ )
_a = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_a = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ )
_a = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_a = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ )
_a = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ )
# Check the shards when the dataset is very small.
_a = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ )
_a = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [[], []]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
# Check the shards when the dataset is a round multiple of batch size.
_a = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ )
_a = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCamelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size.
_a = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ )
_a = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_a = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ )
_a = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ )
# Check the shards when the dataset is very small.
_a = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ )
_a = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [[], []]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
# Check the shards when the dataset is a round multiple of total batch size.
_a = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ )
_a = BatchSampler(range(24 ) , batch_size=3 , drop_last=UpperCamelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_a = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ )
_a = BatchSampler(range(21 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_a = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ )
_a = BatchSampler(range(22 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_a = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ )
_a = BatchSampler(range(20 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ )
# Check the shards when the dataset is very small.
_a = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [[[0, 1]], []]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ )
_a = BatchSampler(range(2 ) , batch_size=3 , drop_last=UpperCamelCase__ )
_a = [[], []]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , even_batches=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
# Check the shards when the dataset is a round multiple of batch size.
_a = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ )
_a = BatchSampler(range(24 ) , batch_size=4 , drop_last=UpperCamelCase__ )
# Expected shouldn't change
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size.
_a = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ )
_a = BatchSampler(range(22 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_a = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ )
_a = BatchSampler(range(21 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ )
# Check the shards when the dataset is very small.
_a = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [[[0, 1]], []]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ )
_a = BatchSampler(range(2 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = [[], []]
self.check_batch_sampler_shards(UpperCamelCase__ , UpperCamelCase__ , split_batches=UpperCamelCase__ , even_batches=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :int ):
_a = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
_a = [BatchSamplerShard(UpperCamelCase__ , 2 , UpperCamelCase__ , even_batches=UpperCamelCase__ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def SCREAMING_SNAKE_CASE_ ( self :List[str] , UpperCamelCase__ :int , UpperCamelCase__ :Optional[Any] , UpperCamelCase__ :Any , UpperCamelCase__ :str=False , UpperCamelCase__ :Optional[Any]=2 , UpperCamelCase__ :int=False ):
random.seed(UpperCamelCase__ )
_a = list(UpperCamelCase__ )
_a = [
IterableDatasetShard(
UpperCamelCase__ , batch_size=UpperCamelCase__ , drop_last=UpperCamelCase__ , num_processes=UpperCamelCase__ , process_index=UpperCamelCase__ , split_batches=UpperCamelCase__ , )
for i in range(UpperCamelCase__ )
]
_a = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(UpperCamelCase__ )
iterable_dataset_lists.append(list(UpperCamelCase__ ) )
_a = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_a = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
self.assertTrue(len(UpperCamelCase__ ) % shard_batch_size == 0 )
_a = []
for idx in range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(UpperCamelCase__ ) < len(UpperCamelCase__ ):
reference += reference
self.assertListEqual(UpperCamelCase__ , reference[: len(UpperCamelCase__ )] )
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
_a = 42
_a = RandomIterableDataset()
self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ )
self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ )
self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ )
self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ )
# Edge case with a very small dataset
_a = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ )
self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ )
self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ )
self.check_iterable_dataset_shards(UpperCamelCase__ , UpperCamelCase__ , batch_size=4 , drop_last=UpperCamelCase__ , split_batches=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
_a = BatchSampler(range(16 ) , batch_size=4 , drop_last=UpperCamelCase__ )
_a = SkipBatchSampler(UpperCamelCase__ , 2 )
self.assertListEqual(list(UpperCamelCase__ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
_a = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
_a = DataLoader(list(range(16 ) ) , batch_size=4 )
_a = skip_first_batches(UpperCamelCase__ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
_a = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(UpperCamelCase__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(UpperCamelCase__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
Accelerator()
_a = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(UpperCamelCase__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(UpperCamelCase__ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 388 | 0 |
"""simple docstring"""
lowercase = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
lowercase = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def UpperCAmelCase ( A : dict[int, list[int]] , A : int , A : list[bool] ):
'''simple docstring'''
_UpperCAmelCase = True
_UpperCAmelCase = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(A , A , A )
order.append(A )
return order
def UpperCAmelCase ( A : dict[int, list[int]] , A : int , A : list[bool] ):
'''simple docstring'''
_UpperCAmelCase = True
_UpperCAmelCase = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(A , A , A )
return component
def UpperCAmelCase ( A : dict[int, list[int]] ):
'''simple docstring'''
_UpperCAmelCase = len(A ) * [False]
_UpperCAmelCase = {vert: [] for vert in range(len(A ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(A )
_UpperCAmelCase = []
for i, was_visited in enumerate(A ):
if not was_visited:
order += topology_sort(A , A , A )
_UpperCAmelCase = []
_UpperCAmelCase = len(A ) * [False]
for i in range(len(A ) ):
_UpperCAmelCase = order[len(A ) - i - 1]
if not visited[vert]:
_UpperCAmelCase = find_components(A , A , A )
components_list.append(A )
return components_list
| 24 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = '''beit'''
def __init__( self , snake_case=8192 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1E-12 , snake_case=224 , snake_case=16 , snake_case=3 , snake_case=False , snake_case=False , snake_case=False , snake_case=False , snake_case=0.1 , snake_case=0.1 , snake_case=True , snake_case=[3, 5, 7, 11] , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ) -> str:
super().__init__(**snake_case )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = use_mask_token
_UpperCAmelCase = use_absolute_position_embeddings
_UpperCAmelCase = use_relative_position_bias
_UpperCAmelCase = use_shared_relative_position_bias
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
_UpperCAmelCase = out_indices
_UpperCAmelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = semantic_loss_ignore_index
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = version.parse('''1.11''' )
@property
def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase_ ( self ) -> float:
return 1E-4
| 24 | 1 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
_snake_case : List[str] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
_snake_case : Optional[Any] = 128_022
_snake_case : Optional[Any] = 128_028
@require_sentencepiece
class a (_lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = MaMaaaTokenizer
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : Dict = True
def __snake_case ( self : Union[str, Any] ) -> int:
super().setUp()
__snake_case : Optional[Any] = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
__snake_case : List[Any] = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
__snake_case : Dict = Path(self.tmpdirname )
save_json(lowerCamelCase , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(lowerCamelCase , save_dir / VOCAB_FILES_NAMES["spm_file"] )
__snake_case : Any = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case ( self : Optional[Any] , **lowerCamelCase : Dict ) -> Tuple:
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def __snake_case ( self : Any , lowerCamelCase : str ) -> List[Any]:
return (
"This is a test",
"This is a test",
)
def __snake_case ( self : Tuple ) -> Optional[Any]:
__snake_case : Dict = "</s>"
__snake_case : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase )
def __snake_case ( self : str ) -> str:
__snake_case : Tuple = self.get_tokenizer()
__snake_case : str = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(lowerCamelCase ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def __snake_case ( self : Dict ) -> str:
pass
def __snake_case ( self : Any ) -> Any:
__snake_case : Any = self.get_tokenizer()
__snake_case : List[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [2, 3, 4, 5, 6] , )
__snake_case : Dict = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
__snake_case : Optional[Any] = tokenizer.convert_tokens_to_string(lowerCamelCase )
self.assertEqual(lowerCamelCase , "This is a test" )
@slow
def __snake_case ( self : Optional[int] ) -> Optional[Any]:
# fmt: off
__snake_case : List[str] = {"input_ids": [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class a (unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = "facebook/m2m100_418M"
__UpperCAmelCase : Optional[Any] = [
"In my opinion, there are two levels of response from the French government.",
"NSA Affair Emphasizes Complete Lack of Debate on Intelligence",
]
__UpperCAmelCase : Dict = [
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
]
# fmt: off
__UpperCAmelCase : Optional[Any] = [EN_CODE, 593, 1949, 11_5781, 4, 7_1586, 4234, 6_0633, 12_6233, 432, 12_3808, 1_5592, 1197, 11_7132, 12_0618, 5, 2]
@classmethod
def __snake_case ( cls : Tuple ) -> int:
__snake_case : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
__snake_case : Optional[Any] = 1
return cls
def __snake_case ( self : Optional[Any] ) -> Dict:
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 128006 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 128022 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 128076 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 128063 )
def __snake_case ( self : Tuple ) -> List[str]:
__snake_case : List[str] = self.tokenizer.get_vocab()
self.assertEqual(len(lowerCamelCase ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , lowerCamelCase )
def __snake_case ( self : Tuple ) -> int:
__snake_case : str = "en"
__snake_case : Any = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCamelCase )
def __snake_case ( self : Dict ) -> str:
self.assertIn(lowerCamelCase , self.tokenizer.all_special_ids )
# fmt: off
__snake_case : Any = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2]
# fmt: on
__snake_case : Any = self.tokenizer.decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )
__snake_case : Optional[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCamelCase )
self.assertEqual(lowerCamelCase , lowerCamelCase )
self.assertNotIn(self.tokenizer.eos_token , lowerCamelCase )
def __snake_case ( self : int ) -> int:
__snake_case : Dict = tempfile.mkdtemp()
__snake_case : Optional[Any] = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(lowerCamelCase )
__snake_case : Optional[Any] = MaMaaaTokenizer.from_pretrained(lowerCamelCase )
self.assertDictEqual(new_tok.lang_token_to_id , lowerCamelCase )
@require_torch
def __snake_case ( self : List[str] ) -> str:
__snake_case : Tuple = "en"
__snake_case : List[str] = "fr"
__snake_case : Any = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCamelCase , return_tensors="pt" )
__snake_case : Union[str, Any] = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
__snake_case : str = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __snake_case ( self : str ) -> List[str]:
__snake_case : Any = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
__snake_case : Tuple = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def __snake_case ( self : Optional[Any] ) -> int:
__snake_case : Optional[Any] = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
__snake_case : Dict = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __snake_case ( self : List[str] ) -> List[Any]:
__snake_case : List[Any] = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(lowerCamelCase ) , {
# en_XX, A, test, EOS
"input_ids": [[128022, 58, 4183, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 128006,
} , )
| 81 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : str = (UniPCMultistepScheduler,)
__SCREAMING_SNAKE_CASE : Dict = (("""num_inference_steps""", 25),)
def UpperCAmelCase__ ( self : str , **__UpperCamelCase : Any ):
_UpperCAmelCase = {
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"solver_type": "bh2",
}
config.update(**__UpperCamelCase )
return config
def UpperCAmelCase__ ( self : int , __UpperCamelCase : Any=0 , **__UpperCamelCase : Any ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
_UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase )
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase , _UpperCAmelCase = sample, sample
for t in range(__UpperCamelCase , time_step + scheduler.config.solver_order + 1 ):
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any]=0 , **__UpperCamelCase : List[Any] ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
_UpperCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residual (must be after setting timesteps)
_UpperCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = new_scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Dict=None , **__UpperCamelCase : Optional[Any] ):
if scheduler is None:
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = 10
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
return sample
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = dict(self.forward_default_kwargs )
_UpperCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = self.dummy_sample
_UpperCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(__UpperCamelCase , "set_timesteps" ):
scheduler.set_timesteps(__UpperCamelCase )
elif num_inference_steps is not None and not hasattr(__UpperCamelCase , "set_timesteps" ):
_UpperCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_UpperCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
_UpperCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
_UpperCAmelCase = scheduler.timesteps[5]
_UpperCAmelCase = scheduler.timesteps[6]
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase__ ( self : Union[str, Any] ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_UpperCAmelCase = UniPCMultistepScheduler(**self.get_scheduler_config() )
_UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
_UpperCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase = self.full_loop(scheduler=__UpperCamelCase )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def UpperCAmelCase__ ( self : str ):
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
self.check_over_configs(thresholding=__UpperCamelCase )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , )
def UpperCAmelCase__ ( self : int ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , )
_UpperCAmelCase = self.full_loop(
solver_order=__UpperCamelCase , solver_type=__UpperCamelCase , prediction_type=__UpperCamelCase , )
assert not torch.isnan(__UpperCamelCase ).any(), "Samples have nan numbers"
def UpperCAmelCase__ ( self : Optional[int] ):
self.check_over_configs(lower_order_final=__UpperCamelCase )
self.check_over_configs(lower_order_final=__UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=__UpperCamelCase , time_step=0 )
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = self.full_loop()
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.2464 ) < 1e-3
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = self.full_loop(prediction_type="v_prediction" )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_mean.item() - 0.1014 ) < 1e-3
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(thresholding=__UpperCamelCase , dynamic_thresholding_ratio=0 )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = 10
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
assert sample.dtype == torch.floataa
def UpperCAmelCase__ ( self : str , **__UpperCamelCase : Optional[Any] ):
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 684 | 0 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _SCREAMING_SNAKE_CASE ( ):
_lowercase = HfArgumentParser(lowerCamelCase__ )
_lowercase = parser.parse_args_into_dataclasses()[0]
_lowercase = TensorFlowBenchmark(args=lowerCamelCase__ )
try:
_lowercase = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
_lowercase = "Arg --no_{0} is no longer used, please use --no-{0} instead."
_lowercase = " ".join(str(lowerCamelCase__ ).split(""" """ )[:-1] )
_lowercase = ""
_lowercase = eval(str(lowerCamelCase__ ).split(""" """ )[-1] )
_lowercase = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
_lowercase = full_error_msg + begin_error_msg + str(lowerCamelCase__ )
raise ValueError(lowerCamelCase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 710 |
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __a ( unittest.TestCase ):
def __init__( self : Dict , lowercase__ : Optional[int] , lowercase__ : Optional[Any]=7 , lowercase__ : Dict=3 , lowercase__ : Optional[int]=18 , lowercase__ : Any=30 , lowercase__ : Tuple=4_00 , lowercase__ : Dict=True , lowercase__ : List[str]=None , lowercase__ : Tuple=True , lowercase__ : Optional[int]=None , lowercase__ : Any=True , lowercase__ : Union[str, Any]=[0.5, 0.5, 0.5] , lowercase__ : Tuple=[0.5, 0.5, 0.5] , lowercase__ : Optional[Any]=False , ) ->str:
"""simple docstring"""
_lowercase = size if size is not None else {"""height""": 20, """width""": 20}
_lowercase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_lowercase = parent
_lowercase = batch_size
_lowercase = num_channels
_lowercase = image_size
_lowercase = min_resolution
_lowercase = max_resolution
_lowercase = do_resize
_lowercase = size
_lowercase = do_center_crop
_lowercase = crop_size
_lowercase = do_normalize
_lowercase = image_mean
_lowercase = image_std
_lowercase = do_reduce_labels
def _UpperCAmelCase ( self : Union[str, Any]) ->str:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def _SCREAMING_SNAKE_CASE ( ):
_lowercase = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
_lowercase = Image.open(dataset[0]["""file"""] )
_lowercase = Image.open(dataset[1]["""file"""] )
return image, map
def _SCREAMING_SNAKE_CASE ( ):
_lowercase = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
_lowercase = Image.open(ds[0]["""file"""] )
_lowercase = Image.open(ds[1]["""file"""] )
_lowercase = Image.open(ds[2]["""file"""] )
_lowercase = Image.open(ds[3]["""file"""] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class __a ( _snake_case ,unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = BeitImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self : Any) ->str:
"""simple docstring"""
_lowercase = BeitImageProcessingTester(self)
@property
def _UpperCAmelCase ( self : Union[str, Any]) ->List[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self : Optional[int]) ->Any:
"""simple docstring"""
_lowercase = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowercase__ , """do_resize"""))
self.assertTrue(hasattr(lowercase__ , """size"""))
self.assertTrue(hasattr(lowercase__ , """do_center_crop"""))
self.assertTrue(hasattr(lowercase__ , """center_crop"""))
self.assertTrue(hasattr(lowercase__ , """do_normalize"""))
self.assertTrue(hasattr(lowercase__ , """image_mean"""))
self.assertTrue(hasattr(lowercase__ , """image_std"""))
def _UpperCAmelCase ( self : Optional[Any]) ->str:
"""simple docstring"""
_lowercase = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""height""": 20, """width""": 20})
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18})
self.assertEqual(image_processor.do_reduce_labels , lowercase__)
_lowercase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=lowercase__)
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42})
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84})
self.assertEqual(image_processor.do_reduce_labels , lowercase__)
def _UpperCAmelCase ( self : Union[str, Any]) ->List[Any]:
"""simple docstring"""
pass
def _UpperCAmelCase ( self : List[str]) ->int:
"""simple docstring"""
_lowercase = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__)
for image in image_inputs:
self.assertIsInstance(lowercase__ , Image.Image)
# Test not batched input
_lowercase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowercase = image_processing(lowercase__ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _UpperCAmelCase ( self : str) ->int:
"""simple docstring"""
_lowercase = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , numpify=lowercase__)
for image in image_inputs:
self.assertIsInstance(lowercase__ , np.ndarray)
# Test not batched input
_lowercase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowercase = image_processing(lowercase__ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _UpperCAmelCase ( self : Dict) ->Union[str, Any]:
"""simple docstring"""
_lowercase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , torchify=lowercase__)
for image in image_inputs:
self.assertIsInstance(lowercase__ , torch.Tensor)
# Test not batched input
_lowercase = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowercase = image_processing(lowercase__ , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _UpperCAmelCase ( self : Dict) ->Any:
"""simple docstring"""
_lowercase = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_lowercase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase__ , torchify=lowercase__)
_lowercase = []
for image in image_inputs:
self.assertIsInstance(lowercase__ , torch.Tensor)
maps.append(torch.zeros(image.shape[-2:]).long())
# Test not batched input
_lowercase = image_processing(image_inputs[0] , maps[0] , return_tensors="""pt""")
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long)
self.assertTrue(encoding["""labels"""].min().item() >= 0)
self.assertTrue(encoding["""labels"""].max().item() <= 2_55)
# Test batched
_lowercase = image_processing(lowercase__ , lowercase__ , return_tensors="""pt""")
self.assertEqual(
encoding["""pixel_values"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long)
self.assertTrue(encoding["""labels"""].min().item() >= 0)
self.assertTrue(encoding["""labels"""].max().item() <= 2_55)
# Test not batched input (PIL images)
_lowercase , _lowercase = prepare_semantic_single_inputs()
_lowercase = image_processing(lowercase__ , lowercase__ , return_tensors="""pt""")
self.assertEqual(
encoding["""pixel_values"""].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
1,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long)
self.assertTrue(encoding["""labels"""].min().item() >= 0)
self.assertTrue(encoding["""labels"""].max().item() <= 2_55)
# Test batched input (PIL images)
_lowercase , _lowercase = prepare_semantic_batch_inputs()
_lowercase = image_processing(lowercase__ , lowercase__ , return_tensors="""pt""")
self.assertEqual(
encoding["""pixel_values"""].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(
encoding["""labels"""].shape , (
2,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
self.assertEqual(encoding["""labels"""].dtype , torch.long)
self.assertTrue(encoding["""labels"""].min().item() >= 0)
self.assertTrue(encoding["""labels"""].max().item() <= 2_55)
def _UpperCAmelCase ( self : Dict) ->Optional[Any]:
"""simple docstring"""
_lowercase = self.image_processing_class(**self.image_processor_dict)
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
_lowercase , _lowercase = prepare_semantic_single_inputs()
_lowercase = image_processing(lowercase__ , lowercase__ , return_tensors="""pt""")
self.assertTrue(encoding["""labels"""].min().item() >= 0)
self.assertTrue(encoding["""labels"""].max().item() <= 1_50)
_lowercase = True
_lowercase = image_processing(lowercase__ , lowercase__ , return_tensors="""pt""")
self.assertTrue(encoding["""labels"""].min().item() >= 0)
self.assertTrue(encoding["""labels"""].max().item() <= 2_55)
| 572 | 0 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(">=", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
SCREAMING_SNAKE_CASE : Union[str, Any] = get_logger(__name__)
def UpperCamelCase ( _a , _a , _a , _a , _a=0 ) -> List[Any]:
'''simple docstring'''
os.makedirs(_a , exist_ok=_a )
with FSDP.state_dict_type(
_a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
lowercase_ :Union[str, Any] = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
lowercase_ :List[Any] = f"{MODEL_NAME}.bin" if model_index == 0 else f"{MODEL_NAME}_{model_index}.bin"
lowercase_ :Tuple = os.path.join(_a , _a )
if accelerator.process_index == 0:
logger.info(f"Saving model to {output_model_file}" )
torch.save(_a , _a )
logger.info(f"Model saved to {output_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
lowercase_ :int = (
f"{MODEL_NAME}_rank{accelerator.process_index}.bin"
if model_index == 0
else f"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"
)
lowercase_ :str = os.path.join(_a , _a )
logger.info(f"Saving model to {output_model_file}" )
torch.save(_a , _a )
logger.info(f"Model saved to {output_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
lowercase_ :int = os.path.join(_a , f"{MODEL_NAME}_{model_index}" )
os.makedirs(_a , exist_ok=_a )
logger.info(f"Saving model to {ckpt_dir}" )
lowercase_ :Any = {'''model''': state_dict}
dist_cp.save_state_dict(
state_dict=_a , storage_writer=dist_cp.FileSystemWriter(_a ) , planner=DefaultSavePlanner() , )
logger.info(f"Model saved to {ckpt_dir}" )
def UpperCamelCase ( _a , _a , _a , _a , _a=0 ) -> int:
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(_a ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'''Set the `sync_module_states` flag to `True` so that model states are synced across processes when '''
'''initializing FSDP object''' )
return
lowercase_ :Any = f"{MODEL_NAME}.bin" if model_index == 0 else f"{MODEL_NAME}_{model_index}.bin"
lowercase_ :Optional[Any] = os.path.join(_a , _a )
logger.info(f"Loading model from {input_model_file}" )
lowercase_ :Optional[Any] = torch.load(_a )
logger.info(f"Model loaded from {input_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
lowercase_ :Tuple = (
f"{MODEL_NAME}_rank{accelerator.process_index}.bin"
if model_index == 0
else f"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"
)
lowercase_ :List[Any] = os.path.join(_a , _a )
logger.info(f"Loading model from {input_model_file}" )
lowercase_ :List[Any] = torch.load(_a )
logger.info(f"Model loaded from {input_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
lowercase_ :List[Any] = (
os.path.join(_a , f"{MODEL_NAME}_{model_index}" )
if f"{MODEL_NAME}" not in input_dir
else input_dir
)
logger.info(f"Loading model from {ckpt_dir}" )
lowercase_ :Tuple = {'''model''': model.state_dict()}
dist_cp.load_state_dict(
state_dict=_a , storage_reader=dist_cp.FileSystemReader(_a ) , planner=DefaultLoadPlanner() , )
lowercase_ :List[Any] = state_dict['''model''']
logger.info(f"Model loaded from {ckpt_dir}" )
model.load_state_dict(_a )
def UpperCamelCase ( _a , _a , _a , _a , _a , _a=0 ) -> Optional[Any]:
'''simple docstring'''
os.makedirs(_a , exist_ok=_a )
with FSDP.state_dict_type(
_a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
lowercase_ :Optional[Any] = FSDP.optim_state_dict(_a , _a )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
lowercase_ :Union[str, Any] = (
f"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else f"{OPTIMIZER_NAME}_{optimizer_index}.bin"
)
lowercase_ :Optional[Any] = os.path.join(_a , _a )
logger.info(f"Saving Optimizer state to {output_optimizer_file}" )
torch.save(_a , _a )
logger.info(f"Optimizer state saved in {output_optimizer_file}" )
else:
lowercase_ :Optional[Any] = os.path.join(_a , f"{OPTIMIZER_NAME}_{optimizer_index}" )
os.makedirs(_a , exist_ok=_a )
logger.info(f"Saving Optimizer state to {ckpt_dir}" )
dist_cp.save_state_dict(
state_dict={'''optimizer''': optim_state} , storage_writer=dist_cp.FileSystemWriter(_a ) , planner=DefaultSavePlanner() , )
logger.info(f"Optimizer state saved in {ckpt_dir}" )
def UpperCamelCase ( _a , _a , _a , _a , _a , _a=0 ) -> Optional[Any]:
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
lowercase_ :List[Any] = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
lowercase_ :Any = (
f"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else f"{OPTIMIZER_NAME}_{optimizer_index}.bin"
)
lowercase_ :Any = os.path.join(_a , _a )
logger.info(f"Loading Optimizer state from {input_optimizer_file}" )
lowercase_ :Any = torch.load(_a )
logger.info(f"Optimizer state loaded from {input_optimizer_file}" )
else:
lowercase_ :Union[str, Any] = (
os.path.join(_a , f"{OPTIMIZER_NAME}_{optimizer_index}" )
if f"{OPTIMIZER_NAME}" not in input_dir
else input_dir
)
logger.info(f"Loading Optimizer from {ckpt_dir}" )
lowercase_ :Optional[Any] = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='''optimizer''' , storage_reader=dist_cp.FileSystemReader(_a ) , )
lowercase_ :List[Any] = optim_state['''optimizer''']
logger.info(f"Optimizer loaded from {ckpt_dir}" )
lowercase_ :List[str] = FSDP.optim_state_dict_to_load(_a , _a , _a )
optimizer.load_state_dict(_a )
| 257 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
SCREAMING_SNAKE_CASE : str = {
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def UpperCamelCase ( _a ) -> int:
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def UpperCamelCase ( _a , _a ) -> Any:
'''simple docstring'''
if args.student_type == "roberta":
lowercase_ :List[str] = False
elif args.student_type == "gpt2":
lowercase_ :Optional[int] = False
def UpperCamelCase ( _a , _a ) -> str:
'''simple docstring'''
if args.student_type == "roberta":
lowercase_ :int = False
def UpperCamelCase ( ) -> int:
'''simple docstring'''
lowercase_ :Optional[int] = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=_a , required=_a , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=_a , required=_a , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=_a , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=_a , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=_a , required=_a , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=_a , type=_a , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=_a , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=_a , required=_a , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=_a , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=_a , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=_a , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=_a , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=_a , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=_a , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=_a , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=_a , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=_a , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=_a , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=_a , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=_a , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=_a , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=_a , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=_a , default=5_0 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=_a , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=_a , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=_a , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=_a , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=_a , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=_a , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=_a , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=_a , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=_a , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=_a , default=5_6 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=_a , default=5_0_0 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=_a , default=4_0_0_0 , help='''Checkpoint interval.''' )
lowercase_ :Union[str, Any] = parser.parse_args()
sanity_checks(_a )
# ARGS #
init_gpu_params(_a )
set_seed(_a )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"Experiment will be dumped and logged in {args.dump_path}" )
# SAVE PARAMS #
logger.info(f"Param: {args}" )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(_a ) , _a , indent=4 )
git_log(args.dump_path )
lowercase_ , lowercase_ , lowercase_ :Dict = MODEL_CLASSES[args.student_type]
lowercase_ , lowercase_ , lowercase_ :Dict = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
lowercase_ :Union[str, Any] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
lowercase_ :Optional[Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
lowercase_ :List[str] = tokenizer.all_special_tokens.index(_a )
lowercase_ :Union[str, Any] = tokenizer.all_special_ids[idx]
logger.info(f"Special tokens {special_tok_ids}" )
lowercase_ :Dict = special_tok_ids
lowercase_ :List[str] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"Loading data from {args.data_file}" )
with open(args.data_file , '''rb''' ) as fp:
lowercase_ :Tuple = pickle.load(_a )
if args.mlm:
logger.info(f"Loading token counts from {args.token_counts} (already pre-computed)" )
with open(args.token_counts , '''rb''' ) as fp:
lowercase_ :List[Any] = pickle.load(_a )
lowercase_ :Tuple = np.maximum(_a , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
lowercase_ :List[Any] = 0.0 # do not predict special tokens
lowercase_ :Dict = torch.from_numpy(_a )
else:
lowercase_ :Tuple = None
lowercase_ :List[Any] = LmSeqsDataset(params=_a , data=_a )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(f"Loading student config from {args.student_config}" )
lowercase_ :Union[str, Any] = student_config_class.from_pretrained(args.student_config )
lowercase_ :int = True
if args.student_pretrained_weights is not None:
logger.info(f"Loading pretrained weights from {args.student_pretrained_weights}" )
lowercase_ :List[Any] = student_model_class.from_pretrained(args.student_pretrained_weights , config=_a )
else:
lowercase_ :Dict = student_model_class(_a )
if args.n_gpu > 0:
student.to(f"cuda:{args.local_rank}" )
logger.info('''Student loaded.''' )
# TEACHER #
lowercase_ :int = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_a )
if args.n_gpu > 0:
teacher.to(f"cuda:{args.local_rank}" )
logger.info(f"Teacher loaded from {args.teacher_name}." )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(_a , _a )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(_a , _a )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
lowercase_ :Tuple = Distiller(
params=_a , dataset=_a , token_probs=_a , student=_a , teacher=_a )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 257 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
"""configuration_upernet""": ["""UperNetConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""UperNetForSemanticSegmentation""",
"""UperNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 291 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class snake_case__ ( unittest.TestCase):
'''simple docstring'''
def __init__( self , a__ , a__=7 , a__=3 , a__=18 , a__=30 , a__=4_00 , a__=True , a__=32 , a__=True , ) -> List[Any]:
'''simple docstring'''
__snake_case :List[Any] = parent
__snake_case :Dict = batch_size
__snake_case :Optional[Any] = num_channels
__snake_case :Dict = image_size
__snake_case :Dict = min_resolution
__snake_case :Dict = max_resolution
__snake_case :List[Any] = do_resize
__snake_case :Dict = size_divisor
__snake_case :Union[str, Any] = do_rescale
def __lowercase ( self ) -> Tuple:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class snake_case__ ( lowercase_ , unittest.TestCase):
'''simple docstring'''
lowerCamelCase : Tuple = GLPNImageProcessor if is_vision_available() else None
def __lowercase ( self ) -> Tuple:
'''simple docstring'''
__snake_case :Dict = GLPNImageProcessingTester(self )
@property
def __lowercase ( self ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase ( self ) -> List[str]:
'''simple docstring'''
__snake_case :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , """do_resize""" ) )
self.assertTrue(hasattr(a__ , """size_divisor""" ) )
self.assertTrue(hasattr(a__ , """resample""" ) )
self.assertTrue(hasattr(a__ , """do_rescale""" ) )
def __lowercase ( self ) -> str:
'''simple docstring'''
pass
def __lowercase ( self ) -> Dict:
'''simple docstring'''
__snake_case :int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__snake_case :Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __lowercase ( self ) -> Tuple:
'''simple docstring'''
__snake_case :int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case :List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , numpify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__snake_case :int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __lowercase ( self ) -> Any:
'''simple docstring'''
__snake_case :Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case :Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__snake_case :List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 291 | 1 |
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
UpperCamelCase = logging.get_logger(__name__)
enable_full_determinism()
class _lowerCamelCase ( _a , _a , unittest.TestCase ):
"""simple docstring"""
snake_case = UNetaDModel
snake_case = """sample"""
@property
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : Dict = 4
A_ : List[str] = 3
A_ : List[str] = (32, 32)
A_ : Tuple = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case_ )
A_ : List[Any] = torch.tensor([10] ).to(snake_case_ )
return {"sample": noise, "timestep": time_step}
@property
def _snake_case ( self )->int:
'''simple docstring'''
return (3, 32, 32)
@property
def _snake_case ( self )->Tuple:
'''simple docstring'''
return (3, 32, 32)
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
A_ : List[str] = {
"""block_out_channels""": (32, 64),
"""down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""),
"""up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""),
"""attention_head_dim""": 3,
"""out_channels""": 3,
"""in_channels""": 3,
"""layers_per_block""": 2,
"""sample_size""": 32,
}
A_ : Dict = self.dummy_input
return init_dict, inputs_dict
class _lowerCamelCase ( _a , _a , unittest.TestCase ):
"""simple docstring"""
snake_case = UNetaDModel
snake_case = """sample"""
@property
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : str = 4
A_ : Optional[Any] = 4
A_ : Optional[int] = (32, 32)
A_ : Dict = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case_ )
A_ : str = torch.tensor([10] ).to(snake_case_ )
return {"sample": noise, "timestep": time_step}
@property
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
return (4, 32, 32)
@property
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
return (4, 32, 32)
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : int = {
"""sample_size""": 32,
"""in_channels""": 4,
"""out_channels""": 4,
"""layers_per_block""": 2,
"""block_out_channels""": (32, 64),
"""attention_head_dim""": 32,
"""down_block_types""": ("""DownBlock2D""", """DownBlock2D"""),
"""up_block_types""": ("""UpBlock2D""", """UpBlock2D"""),
}
A_ : int = self.dummy_input
return init_dict, inputs_dict
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : List[str] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(snake_case_ )
A_ : List[str] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def _snake_case ( self )->List[Any]:
'''simple docstring'''
A_ : Union[str, Any] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=snake_case_ )
model.to(snake_case_ )
A_ : Union[str, Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' , '''This test is supposed to run on GPU''' )
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : Optional[Any] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' , output_loading_info=snake_case_ )
model_accelerate.to(snake_case_ )
model_accelerate.eval()
A_ : List[Any] = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
A_ : Any = noise.to(snake_case_ )
A_ : List[str] = torch.tensor([10] * noise.shape[0] ).to(snake_case_ )
A_ : Tuple = model_accelerate(snake_case_ , snake_case_ )["""sample"""]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
A_ : int = UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' , output_loading_info=snake_case_ , low_cpu_mem_usage=snake_case_ )
model_normal_load.to(snake_case_ )
model_normal_load.eval()
A_ : Union[str, Any] = model_normal_load(snake_case_ , snake_case_ )["""sample"""]
assert torch_all_close(snake_case_ , snake_case_ , rtol=1e-3 )
def _snake_case ( self )->str:
'''simple docstring'''
A_ : str = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(snake_case_ )
A_ : Dict = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
A_ : Tuple = noise.to(snake_case_ )
A_ : Optional[Any] = torch.tensor([10] * noise.shape[0] ).to(snake_case_ )
with torch.no_grad():
A_ : List[Any] = model(snake_case_ , snake_case_ ).sample
A_ : Union[str, Any] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
A_ : Dict = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] )
# fmt: on
self.assertTrue(torch_all_close(snake_case_ , snake_case_ , rtol=1e-3 ) )
class _lowerCamelCase ( _a , _a , unittest.TestCase ):
"""simple docstring"""
snake_case = UNetaDModel
snake_case = """sample"""
@property
def _snake_case ( self , _SCREAMING_SNAKE_CASE=(32, 32) )->Tuple:
'''simple docstring'''
A_ : Any = 4
A_ : int = 3
A_ : Tuple = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case_ )
A_ : List[Any] = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=snake_case_ )
return {"sample": noise, "timestep": time_step}
@property
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
return (3, 32, 32)
@property
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
return (3, 32, 32)
def _snake_case ( self )->str:
'''simple docstring'''
A_ : List[Any] = {
"""block_out_channels""": [32, 64, 64, 64],
"""in_channels""": 3,
"""layers_per_block""": 1,
"""out_channels""": 3,
"""time_embedding_type""": """fourier""",
"""norm_eps""": 1e-6,
"""mid_block_scale_factor""": math.sqrt(2.0 ),
"""norm_num_groups""": None,
"""down_block_types""": [
"""SkipDownBlock2D""",
"""AttnSkipDownBlock2D""",
"""SkipDownBlock2D""",
"""SkipDownBlock2D""",
],
"""up_block_types""": [
"""SkipUpBlock2D""",
"""SkipUpBlock2D""",
"""AttnSkipUpBlock2D""",
"""SkipUpBlock2D""",
],
}
A_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
@slow
def _snake_case ( self )->Optional[Any]:
'''simple docstring'''
A_ : Optional[int] = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' , output_loading_info=snake_case_ )
self.assertIsNotNone(snake_case_ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(snake_case_ )
A_ : Optional[Any] = self.dummy_input
A_ : Union[str, Any] = floats_tensor((4, 3) + (256, 256) ).to(snake_case_ )
A_ : Any = noise
A_ : str = model(**snake_case_ )
assert image is not None, "Make sure output is not None"
@slow
def _snake_case ( self )->int:
'''simple docstring'''
A_ : List[Any] = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(snake_case_ )
A_ : Optional[int] = 4
A_ : Any = 3
A_ : Any = (256, 256)
A_ : Optional[Any] = torch.ones((batch_size, num_channels) + sizes ).to(snake_case_ )
A_ : Optional[int] = torch.tensor(batch_size * [1e-4] ).to(snake_case_ )
with torch.no_grad():
A_ : Any = model(snake_case_ , snake_case_ ).sample
A_ : int = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
A_ : Optional[Any] = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0980.7129, -2_0028.8535, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] )
# fmt: on
self.assertTrue(torch_all_close(snake_case_ , snake_case_ , rtol=1e-2 ) )
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
A_ : Dict = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(snake_case_ )
A_ : Union[str, Any] = 4
A_ : Optional[Any] = 3
A_ : str = (32, 32)
A_ : int = torch.ones((batch_size, num_channels) + sizes ).to(snake_case_ )
A_ : Dict = torch.tensor(batch_size * [1e-4] ).to(snake_case_ )
with torch.no_grad():
A_ : Optional[Any] = model(snake_case_ , snake_case_ ).sample
A_ : Optional[int] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
A_ : Any = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] )
# fmt: on
self.assertTrue(torch_all_close(snake_case_ , snake_case_ , rtol=1e-2 ) )
def _snake_case ( self )->Union[str, Any]:
'''simple docstring'''
pass
| 590 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _UpperCAmelCase ( _lowerCamelCase : NDArray[floataa] , _lowerCamelCase : NDArray[floataa] , _lowerCamelCase : list[int] , _lowerCamelCase : int , ) -> list[float]:
_lowerCAmelCase , _lowerCAmelCase : Dict = coefficient_matrix.shape
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = constant_matrix.shape
if rowsa != colsa:
_lowerCAmelCase : Any = f'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(_lowerCamelCase )
if colsa != 1:
_lowerCAmelCase : List[str] = f'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(_lowerCamelCase )
if rowsa != rowsa:
_lowerCAmelCase : Tuple = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
f'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(_lowerCamelCase )
if len(_lowerCamelCase ) != rowsa:
_lowerCAmelCase : int = (
"""Number of initial values must be equal to number of rows in coefficient """
f'matrix but received {len(_lowerCamelCase )} and {rowsa}'
)
raise ValueError(_lowerCamelCase )
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""" )
_lowerCAmelCase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
_lowerCAmelCase , _lowerCAmelCase : Any = table.shape
strictly_diagonally_dominant(_lowerCamelCase )
# Iterates the whole matrix for given number of times
for _ in range(_lowerCamelCase ):
_lowerCAmelCase : int = []
for row in range(_lowerCamelCase ):
_lowerCAmelCase : Any = 0
for col in range(_lowerCamelCase ):
if col == row:
_lowerCAmelCase : List[Any] = table[row][col]
elif col == cols - 1:
_lowerCAmelCase : Tuple = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
_lowerCAmelCase : str = (temp + val) / denom
new_val.append(_lowerCamelCase )
_lowerCAmelCase : int = new_val
return [float(_lowerCamelCase ) for i in new_val]
def _UpperCAmelCase ( _lowerCamelCase : NDArray[floataa] ) -> bool:
_lowerCAmelCase , _lowerCAmelCase : str = table.shape
_lowerCAmelCase : int = True
for i in range(0 , _lowerCamelCase ):
_lowerCAmelCase : Any = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 384 | 0 |
"""simple docstring"""
a_ = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
a_ = ["a", "b", "c", "d", "e"]
def UpperCAmelCase_ ( __a : str , __a : Optional[Any] , __a : Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Dict = start
# add current to visited
visited.append(UpperCamelCase__ )
_lowerCamelCase : Union[str, Any] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
_lowerCamelCase : int = topological_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# if all neighbors visited add current to sort
sort.append(UpperCamelCase__ )
# if all vertices haven't been visited select a new one to visit
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
for vertice in vertices:
if vertice not in visited:
_lowerCamelCase : int = topological_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# return sort
return sort
if __name__ == "__main__":
a_ = topological_sort("""a""", [], [])
print(sort)
| 703 |
"""simple docstring"""
import numpy as np
def UpperCAmelCase_ ( __a : np.array ):
'''simple docstring'''
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 349 | 0 |
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase : Optional[Any] = []
def A_( A : List[Any] , A : Dict , A : List[str]):
for i in range(len(__lowerCamelCase)):
if board[row][i] == 1:
return False
for i in range(len(__lowerCamelCase)):
if board[i][column] == 1:
return False
for i, j in zip(range(__lowerCamelCase , -1 , -1) , range(__lowerCamelCase , -1 , -1)):
if board[i][j] == 1:
return False
for i, j in zip(range(__lowerCamelCase , -1 , -1) , range(__lowerCamelCase , len(__lowerCamelCase))):
if board[i][j] == 1:
return False
return True
def A_( A : Dict , A : int):
if row >= len(__lowerCamelCase):
solution.append(__lowerCamelCase)
printboard(__lowerCamelCase)
print()
return True
for i in range(len(__lowerCamelCase)):
if is_safe(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase):
UpperCamelCase = 1
solve(__lowerCamelCase , row + 1)
UpperCamelCase = 0
return False
def A_( A : List[str]):
for i in range(len(__lowerCamelCase)):
for j in range(len(__lowerCamelCase)):
if board[i][j] == 1:
print('Q' , end=' ')
else:
print('.' , end=' ')
print()
# n=int(input("The no. of queens"))
lowerCAmelCase : Any = 8
lowerCAmelCase : Tuple = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('The total no. of solutions are :', len(solution))
| 3 |
from math import factorial, radians
def __A ( __lowerCamelCase , __lowerCamelCase = 18 , __lowerCamelCase = 10 ) -> float:
a = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
a = radians(__lowerCamelCase )
a = angle_in_radians
a = 3
a = -1
for _ in range(__lowerCamelCase ):
result += (b * (angle_in_radians**a)) / factorial(__lowerCamelCase )
a = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
__import__("doctest").testmod()
| 468 | 0 |
from __future__ import annotations
import math
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_UpperCAmelCase = [num for num in range(3, 10_0001, 2) if not is_prime(num)]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int ) -> list[int]:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
__lowerCAmelCase : List[Any] = []
for num in range(len(SCREAMING_SNAKE_CASE ) ):
__lowerCAmelCase : str = 0
while 2 * i * i <= odd_composites[num]:
__lowerCAmelCase : Tuple = odd_composites[num] - 2 * i * i
if is_prime(SCREAMING_SNAKE_CASE ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(SCREAMING_SNAKE_CASE ) == n:
return list_nums
return []
def _SCREAMING_SNAKE_CASE ( ) -> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'''{solution() = }''') | 240 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
_UpperCAmelCase = [
'openmmlab/upernet-convnext-tiny',
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
_UpperCAmelCase = 'UperNetConfig'
class snake_case_ ( nn.Module ):
def __init__( self : List[str] , _snake_case : int , _snake_case : int , _snake_case : Union[int, Tuple[int, int]] , _snake_case : Union[int, Tuple[int, int], str] = 0 , _snake_case : bool = False , _snake_case : Union[int, Tuple[int, int]] = 1 , )->None:
'''simple docstring'''
super().__init__()
__lowerCAmelCase : Tuple = nn.Convad(
in_channels=_snake_case , out_channels=_snake_case , kernel_size=_snake_case , padding=_snake_case , bias=_snake_case , dilation=_snake_case , )
__lowerCAmelCase : Any = nn.BatchNormad(_snake_case )
__lowerCAmelCase : Union[str, Any] = nn.ReLU()
def UpperCAmelCase__ ( self : Tuple , _snake_case : torch.Tensor )->torch.Tensor:
'''simple docstring'''
__lowerCAmelCase : List[str] = self.conv(_snake_case )
__lowerCAmelCase : Any = self.batch_norm(_snake_case )
__lowerCAmelCase : List[Any] = self.activation(_snake_case )
return output
class snake_case_ ( nn.Module ):
def __init__( self : Dict , _snake_case : int , _snake_case : int , _snake_case : int )->None:
'''simple docstring'''
super().__init__()
__lowerCAmelCase : int = [
nn.AdaptiveAvgPoolad(_snake_case ),
UperNetConvModule(_snake_case , _snake_case , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(_snake_case ) , _snake_case )
def UpperCAmelCase__ ( self : List[str] , _snake_case : torch.Tensor )->torch.Tensor:
'''simple docstring'''
__lowerCAmelCase : List[Any] = input
for layer in self.layers:
__lowerCAmelCase : List[Any] = layer(_snake_case )
return hidden_state
class snake_case_ ( nn.Module ):
def __init__( self : Union[str, Any] , _snake_case : Tuple[int, ...] , _snake_case : int , _snake_case : int , _snake_case : bool )->None:
'''simple docstring'''
super().__init__()
__lowerCAmelCase : str = pool_scales
__lowerCAmelCase : List[Any] = align_corners
__lowerCAmelCase : Tuple = in_channels
__lowerCAmelCase : Optional[int] = channels
__lowerCAmelCase : Optional[int] = []
for i, pool_scale in enumerate(_snake_case ):
__lowerCAmelCase : List[str] = UperNetPyramidPoolingBlock(pool_scale=_snake_case , in_channels=_snake_case , channels=_snake_case )
self.blocks.append(_snake_case )
self.add_module(str(_snake_case ) , _snake_case )
def UpperCAmelCase__ ( self : List[Any] , _snake_case : torch.Tensor )->List[torch.Tensor]:
'''simple docstring'''
__lowerCAmelCase : Any = []
for ppm in self.blocks:
__lowerCAmelCase : Dict = ppm(_snake_case )
__lowerCAmelCase : Any = nn.functional.interpolate(
_snake_case , size=x.size()[2:] , mode="""bilinear""" , align_corners=self.align_corners )
ppm_outs.append(_snake_case )
return ppm_outs
class snake_case_ ( nn.Module ):
def __init__( self : Optional[Any] , _snake_case : Optional[int] , _snake_case : int )->Optional[Any]:
'''simple docstring'''
super().__init__()
__lowerCAmelCase : List[Any] = config
__lowerCAmelCase : Union[str, Any] = config.pool_scales # e.g. (1, 2, 3, 6)
__lowerCAmelCase : str = in_channels
__lowerCAmelCase : str = config.hidden_size
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : List[str] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
__lowerCAmelCase : Any = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
__lowerCAmelCase : List[Any] = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
__lowerCAmelCase : Any = nn.ModuleList()
__lowerCAmelCase : List[Any] = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
__lowerCAmelCase : str = UperNetConvModule(_snake_case , self.channels , kernel_size=1 )
__lowerCAmelCase : Any = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(_snake_case )
self.fpn_convs.append(_snake_case )
__lowerCAmelCase : int = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def UpperCAmelCase__ ( self : Optional[Any] )->Any:
'''simple docstring'''
self.apply(self._init_weights )
def UpperCAmelCase__ ( self : Any , _snake_case : Optional[Any] )->Union[str, Any]:
'''simple docstring'''
if isinstance(_snake_case , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCAmelCase__ ( self : Tuple , _snake_case : int )->List[Any]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = inputs[-1]
__lowerCAmelCase : Optional[Any] = [x]
psp_outs.extend(self.psp_modules(_snake_case ) )
__lowerCAmelCase : Optional[int] = torch.cat(_snake_case , dim=1 )
__lowerCAmelCase : List[Any] = self.bottleneck(_snake_case )
return output
def UpperCAmelCase__ ( self : List[str] , _snake_case : torch.Tensor )->torch.Tensor:
'''simple docstring'''
__lowerCAmelCase : str = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(_snake_case ) )
# build top-down path
__lowerCAmelCase : Union[str, Any] = len(_snake_case )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__lowerCAmelCase : Optional[Any] = laterals[i - 1].shape[2:]
__lowerCAmelCase : Dict = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=_snake_case , mode="""bilinear""" , align_corners=self.align_corners )
# build outputs
__lowerCAmelCase : Union[str, Any] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__lowerCAmelCase : Optional[Any] = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="""bilinear""" , align_corners=self.align_corners )
__lowerCAmelCase : Dict = torch.cat(_snake_case , dim=1 )
__lowerCAmelCase : Optional[int] = self.fpn_bottleneck(_snake_case )
__lowerCAmelCase : List[str] = self.classifier(_snake_case )
return output
class snake_case_ ( nn.Module ):
def __init__( self : Dict , _snake_case : List[Any] , _snake_case : int = 2 , _snake_case : int = 3 , _snake_case : Union[int, Tuple[int, int]] = 1 )->None:
'''simple docstring'''
super().__init__()
__lowerCAmelCase : List[str] = config
__lowerCAmelCase : Optional[int] = config.auxiliary_in_channels
__lowerCAmelCase : Tuple = config.auxiliary_channels
__lowerCAmelCase : Any = config.auxiliary_num_convs
__lowerCAmelCase : List[Any] = config.auxiliary_concat_input
__lowerCAmelCase : Optional[Any] = in_index
__lowerCAmelCase : Union[str, Any] = (kernel_size // 2) * dilation
__lowerCAmelCase : List[str] = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=_snake_case , padding=_snake_case , dilation=_snake_case ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=_snake_case , padding=_snake_case , dilation=_snake_case ) )
if self.num_convs == 0:
__lowerCAmelCase : Union[str, Any] = nn.Identity()
else:
__lowerCAmelCase : int = nn.Sequential(*_snake_case )
if self.concat_input:
__lowerCAmelCase : Dict = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=_snake_case , padding=kernel_size // 2 )
__lowerCAmelCase : List[str] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def UpperCAmelCase__ ( self : List[Any] )->Tuple:
'''simple docstring'''
self.apply(self._init_weights )
def UpperCAmelCase__ ( self : Any , _snake_case : Any )->int:
'''simple docstring'''
if isinstance(_snake_case , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def UpperCAmelCase__ ( self : List[Any] , _snake_case : torch.Tensor )->torch.Tensor:
'''simple docstring'''
__lowerCAmelCase : List[str] = encoder_hidden_states[self.in_index]
__lowerCAmelCase : List[str] = self.convs(_snake_case )
if self.concat_input:
__lowerCAmelCase : Tuple = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
__lowerCAmelCase : Any = self.classifier(_snake_case )
return output
class snake_case_ ( __lowercase ):
A_ = UperNetConfig
A_ = 'pixel_values'
A_ = True
def UpperCAmelCase__ ( self : Optional[Any] , _snake_case : List[str] )->Optional[int]:
'''simple docstring'''
if isinstance(_snake_case , _snake_case ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def UpperCAmelCase__ ( self : Any )->Tuple:
'''simple docstring'''
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def UpperCAmelCase__ ( self : Tuple , _snake_case : Union[str, Any] , _snake_case : List[Any]=False )->str:
'''simple docstring'''
if isinstance(_snake_case , _snake_case ):
__lowerCAmelCase : List[Any] = value
_UpperCAmelCase = R'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
_UpperCAmelCase = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.' ,__lowercase ,)
class snake_case_ ( __lowercase ):
def __init__( self : str , _snake_case : Optional[int] )->Union[str, Any]:
'''simple docstring'''
super().__init__(_snake_case )
__lowerCAmelCase : List[Any] = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
__lowerCAmelCase : Tuple = UperNetHead(_snake_case , in_channels=self.backbone.channels )
__lowerCAmelCase : str = UperNetFCNHead(_snake_case ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("""batch_size, sequence_length""" ) )
@replace_return_docstrings(output_type=_snake_case , config_class=_CONFIG_FOR_DOC )
def UpperCAmelCase__ ( self : Tuple , _snake_case : Optional[torch.Tensor] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[torch.Tensor] = None , _snake_case : Optional[bool] = None , )->Union[tuple, SemanticSegmenterOutput]:
'''simple docstring'''
__lowerCAmelCase : Dict = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase : Optional[int] = output_attentions if output_attentions is not None else self.config.output_attentions
__lowerCAmelCase : Any = self.backbone.forward_with_filtered_kwargs(
_snake_case , output_hidden_states=_snake_case , output_attentions=_snake_case )
__lowerCAmelCase : int = outputs.feature_maps
__lowerCAmelCase : Union[str, Any] = self.decode_head(_snake_case )
__lowerCAmelCase : str = nn.functional.interpolate(_snake_case , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=_snake_case )
__lowerCAmelCase : int = None
if self.auxiliary_head is not None:
__lowerCAmelCase : List[Any] = self.auxiliary_head(_snake_case )
__lowerCAmelCase : Tuple = nn.functional.interpolate(
_snake_case , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=_snake_case )
__lowerCAmelCase : Dict = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError("""The number of labels should be greater than one""" )
else:
# compute weighted loss
__lowerCAmelCase : Dict = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
__lowerCAmelCase : Any = loss_fct(_snake_case , _snake_case )
__lowerCAmelCase : Optional[Any] = loss_fct(_snake_case , _snake_case )
__lowerCAmelCase : Any = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
__lowerCAmelCase : List[Any] = (logits,) + outputs[1:]
else:
__lowerCAmelCase : Union[str, Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=_snake_case , logits=_snake_case , hidden_states=outputs.hidden_states , attentions=outputs.attentions , ) | 240 | 1 |
import re
import subprocess
import sys
SCREAMING_SNAKE_CASE__ : str = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
SCREAMING_SNAKE_CASE__ : int = subprocess.check_output(f"git diff --name-only {fork_point_sha}".split()).decode("""utf-8""").split()
SCREAMING_SNAKE_CASE__ : List[str] = """|""".join(sys.argv[1:])
SCREAMING_SNAKE_CASE__ : Optional[Any] = re.compile(rf"^({joined_dirs}).*?\.py$")
SCREAMING_SNAKE_CASE__ : str = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 0 |
'''simple docstring'''
import socket
def _A ( ):
'''simple docstring'''
A__ = socket.socket(socket.AF_INET ,socket.SOCK_STREAM )
A__ = socket.gethostname()
A__ = 12312
sock.connect((host, port) )
sock.send(B'Hello server!' )
with open('Received_file' ,'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
A__ = sock.recv(1024 )
if not data:
break
out_file.write(UpperCAmelCase )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 531 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 347 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class a_ ( snake_case ):
UpperCAmelCase : Dict = """time_series_transformer"""
UpperCAmelCase : Optional[Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : Any , a_ : Optional[int] = None , a_ : Optional[int] = None , a_ : str = "student_t" , a_ : str = "nll" , a_ : int = 1 , a_ : List[int] = [1, 2, 3, 4, 5, 6, 7] , a_ : Optional[Union[str, bool]] = "mean" , a_ : int = 0 , a_ : int = 0 , a_ : int = 0 , a_ : int = 0 , a_ : Optional[List[int]] = None , a_ : Optional[List[int]] = None , a_ : int = 3_2 , a_ : int = 3_2 , a_ : int = 2 , a_ : int = 2 , a_ : int = 2 , a_ : int = 2 , a_ : bool = True , a_ : str = "gelu" , a_ : int = 6_4 , a_ : float = 0.1 , a_ : float = 0.1 , a_ : float = 0.1 , a_ : float = 0.1 , a_ : float = 0.1 , a_ : int = 1_0_0 , a_ : float = 0.0_2 , a_ : Optional[int]=True , **a_ : Tuple , ) -> Optional[int]:
# time series specific configuration
snake_case: Dict =prediction_length
snake_case: Any =context_length or prediction_length
snake_case: str =distribution_output
snake_case: List[str] =loss
snake_case: Optional[Any] =input_size
snake_case: Optional[Any] =num_time_features
snake_case: List[str] =lags_sequence
snake_case: Union[str, Any] =scaling
snake_case: List[str] =num_dynamic_real_features
snake_case: Any =num_static_real_features
snake_case: Dict =num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(a_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
snake_case: Dict =cardinality
else:
snake_case: Dict =[0]
if embedding_dimension and num_static_categorical_features > 0:
if len(a_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
snake_case: List[Any] =embedding_dimension
else:
snake_case: str =[min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
snake_case: Any =num_parallel_samples
# Transformer architecture configuration
snake_case: Union[str, Any] =input_size * len(a_ ) + self._number_of_features
snake_case: List[Any] =d_model
snake_case: int =encoder_attention_heads
snake_case: Optional[int] =decoder_attention_heads
snake_case: str =encoder_ffn_dim
snake_case: List[Any] =decoder_ffn_dim
snake_case: str =encoder_layers
snake_case: List[str] =decoder_layers
snake_case: List[Any] =dropout
snake_case: Union[str, Any] =attention_dropout
snake_case: Optional[int] =activation_dropout
snake_case: str =encoder_layerdrop
snake_case: Optional[int] =decoder_layerdrop
snake_case: Tuple =activation_function
snake_case: List[Any] =init_std
snake_case: Union[str, Any] =use_cache
super().__init__(is_encoder_decoder=a_ , **a_ )
@property
def UpperCamelCase ( self : Tuple ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 347 | 1 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def lowercase_ ( _lowerCamelCase: int ) -> Dict:
'''simple docstring'''
__lowerCamelCase : List[str] = int(number**0.5 )
return number == sq * sq
def lowercase_ ( _lowerCamelCase: int , _lowerCamelCase: int , _lowerCamelCase: int , _lowerCamelCase: int , _lowerCamelCase: int , _lowerCamelCase: int ) -> Dict:
'''simple docstring'''
__lowerCamelCase : Dict = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
__lowerCamelCase : Union[str, Any] = x_den * y_den * z_den
__lowerCamelCase : List[Any] = gcd(snake_case__ , snake_case__ )
top //= hcf
bottom //= hcf
return top, bottom
def lowercase_ ( _lowerCamelCase: int = 35 ) -> int:
'''simple docstring'''
__lowerCamelCase : Optional[Any] = set()
__lowerCamelCase : Optional[Any] = 42
__lowerCamelCase : str = Fraction(0 )
__lowerCamelCase : Optional[Any] = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
__lowerCamelCase : List[str] = x_num * y_den + x_den * y_num
__lowerCamelCase : str = x_den * y_den
__lowerCamelCase : Dict = gcd(snake_case__ , snake_case__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__lowerCamelCase : Dict = add_three(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
unique_s.add(snake_case__ )
# n=2
__lowerCamelCase : int = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
__lowerCamelCase : Tuple = x_den * x_den * y_den * y_den
if is_sq(snake_case__ ) and is_sq(snake_case__ ):
__lowerCamelCase : List[Any] = int(sqrt(snake_case__ ) )
__lowerCamelCase : Optional[Any] = int(sqrt(snake_case__ ) )
__lowerCamelCase : Any = gcd(snake_case__ , snake_case__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__lowerCamelCase : Optional[int] = add_three(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
unique_s.add(snake_case__ )
# n=-1
__lowerCamelCase : Any = x_num * y_num
__lowerCamelCase : Any = x_den * y_num + x_num * y_den
__lowerCamelCase : Dict = gcd(snake_case__ , snake_case__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__lowerCamelCase : int = add_three(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
unique_s.add(snake_case__ )
# n=2
__lowerCamelCase : List[Any] = x_num * x_num * y_num * y_num
__lowerCamelCase : Optional[int] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(snake_case__ ) and is_sq(snake_case__ ):
__lowerCamelCase : List[str] = int(sqrt(snake_case__ ) )
__lowerCamelCase : List[str] = int(sqrt(snake_case__ ) )
__lowerCamelCase : int = gcd(snake_case__ , snake_case__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
__lowerCamelCase : Dict = add_three(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
unique_s.add(snake_case__ )
for num, den in unique_s:
total += Fraction(snake_case__ , snake_case__ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""") | 646 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Tuple ,A_ : Any ,A_ : int=13 ,A_ : str=7 ,A_ : Tuple=True ,A_ : str=True ,A_ : str=False ,A_ : List[str]=True ,A_ : str=99 ,A_ : str=32 ,A_ : Optional[int]=5 ,A_ : Optional[Any]=4 ,A_ : str=37 ,A_ : Optional[Any]="gelu" ,A_ : Union[str, Any]=0.1 ,A_ : Any=0.1 ,A_ : Optional[Any]=512 ,A_ : str=16 ,A_ : int=2 ,A_ : Optional[Any]=0.02 ,A_ : str=3 ,A_ : str=4 ,A_ : List[str]=None ,) -> str:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_mask
A = use_token_type_ids
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = type_sequence_label_size
A = initializer_range
A = num_labels
A = num_choices
A = scope
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A = ids_tensor([self.batch_size] ,self.num_choices )
A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=A_ ,initializer_range=self.initializer_range ,)
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Dict ,A_ : Optional[int] ,A_ : Any ,A_ : Optional[Any] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Tuple ) -> List[Any]:
A = LlamaModel(config=A_ )
model.to(A_ )
model.eval()
A = model(A_ ,attention_mask=A_ )
A = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : int ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Any ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : Dict ,) -> List[str]:
A = True
A = LlamaModel(A_ )
model.to(A_ )
model.eval()
A = model(
A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,)
A = model(
A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,)
A = model(A_ ,attention_mask=A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[str] ,A_ : Optional[int] ,A_ : Any ,A_ : str ,A_ : Dict ,A_ : Dict ,A_ : Tuple ,A_ : Tuple ,A_ : Dict ,) -> Union[str, Any]:
A = LlamaForCausalLM(config=A_ )
model.to(A_ )
model.eval()
A = model(A_ ,attention_mask=A_ ,labels=A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Dict ,A_ : Any ,A_ : int ,A_ : List[str] ,A_ : Tuple ,A_ : Any ,A_ : Union[str, Any] ,A_ : Any ,A_ : int ,) -> List[Any]:
A = True
A = True
A = LlamaForCausalLM(config=A_ )
model.to(A_ )
model.eval()
# first forward pass
A = model(
A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,use_cache=A_ ,)
A = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A = ids_tensor((self.batch_size, 3) ,config.vocab_size )
A = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
A = torch.cat([input_ids, next_tokens] ,dim=-1 )
A = torch.cat([input_mask, next_mask] ,dim=-1 )
A = model(
A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,output_hidden_states=A_ ,)['hidden_states'][0]
A = model(
A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,past_key_values=A_ ,output_hidden_states=A_ ,)['hidden_states'][0]
# select random slice
A = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
A = output_from_no_past[:, -3:, random_slice_idx].detach()
A = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A_ ,A_ ,atol=1e-3 ) )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Dict = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_lowerCamelCase: List[Any] = (LlamaForCausalLM,) if is_torch_available() else ()
_lowerCamelCase: Any = (
{
'''feature-extraction''': LlamaModel,
'''text-classification''': LlamaForSequenceClassification,
'''text-generation''': LlamaForCausalLM,
'''zero-shot''': LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase: int = False
_lowerCamelCase: List[str] = False
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
A = LlamaModelTester(self )
A = ConfigTester(self ,config_class=A_ ,hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
A = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A = type
self.model_tester.create_and_check_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = 3
A = input_dict['input_ids']
A = input_ids.ne(1 ).to(A_ )
A = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
A = LlamaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,attention_mask=A_ ,labels=A_ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = 3
A = 'single_label_classification'
A = input_dict['input_ids']
A = input_ids.ne(1 ).to(A_ )
A = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
A = LlamaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,attention_mask=A_ ,labels=A_ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = 3
A = 'multi_label_classification'
A = input_dict['input_ids']
A = input_ids.ne(1 ).to(A_ )
A = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
A = LlamaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,attention_mask=A_ ,labels=A_ )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Any ) -> str:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = ids_tensor([1, 10] ,config.vocab_size )
A = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A = LlamaModel(A_ )
original_model.to(A_ )
original_model.eval()
A = original_model(A_ ).last_hidden_state
A = original_model(A_ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A = {'type': scaling_type, 'factor': 10.0}
A = LlamaModel(A_ )
scaled_model.to(A_ )
scaled_model.eval()
A = scaled_model(A_ ).last_hidden_state
A = scaled_model(A_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A_ ,A_ ,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(A_ ,A_ ,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A_ ,A_ ,atol=1e-5 ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
A = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' ,device_map='auto' )
A = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
A = torch.tensor([[-6.65_50, -4.12_27, -4.98_59, -3.24_06, 0.82_62, -3.00_33, 1.29_64, -3.36_99]] )
torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A = torch.tensor([-12.82_81, -7.44_53, -0.46_39, -8.06_25, -7.25_00, -8.00_00, -6.48_83, -7.76_95, -7.84_38, -7.03_12, -6.21_88, -7.13_28, -1.84_96, 1.99_61, -8.62_50, -6.72_27, -12.82_81, -6.94_92, -7.07_42, -7.78_52, -7.58_20, -7.90_62, -6.93_75, -7.98_05, -8.34_38, -8.15_62, -8.04_69, -7.62_50, -7.74_22, -7.33_98,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,A_ ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ) -> int:
A = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' ,device_map='auto' )
A = model(torch.tensor(A_ ) )
# Expected mean on dim = -1
A = torch.tensor([[-2.06_22, -1.27_94, -1.16_38, -0.97_88, -1.46_03, -1.02_38, -1.78_93, -1.44_11]] )
torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A = torch.tensor([-8.14_06, -8.05_47, 2.74_61, -1.23_44, -0.14_48, -1.82_62, -1.00_20, -1.81_54, -1.68_95, -1.85_16, -2.35_74, -0.92_77, 3.75_98, 6.57_42, -1.29_98, -0.11_77, -8.14_06, -2.96_88, -2.91_99, -3.16_99, -3.52_54, -2.35_55, -2.79_88, -3.41_41, -2.82_62, -4.51_95, -3.33_79, -3.31_64, -2.78_32, -3.02_73] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,A_ ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def _SCREAMING_SNAKE_CASE ( self : int ) -> str:
A = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' ,device_map='auto' )
A = model(torch.tensor(A_ ) )
# Expected mean on dim = -1
A = torch.tensor([[-0.85_62, -1.85_20, -0.75_51, -0.41_62, -1.51_61, -1.20_38, -2.48_23, -2.32_54]] )
torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A = torch.tensor([-2.22_27, 4.88_28, 0.90_23, -0.45_78, -0.78_71, -0.10_33, -0.62_21, -0.57_86, -0.78_03, -1.06_74, -1.29_20, -0.15_70, 0.80_08, 2.07_23, -0.94_97, 0.27_71, -2.22_27, -0.76_12, -1.43_46, -1.20_61, -1.64_26, -0.30_00, -0.71_39, -1.19_34, -1.86_91, -1.69_73, -1.59_47, -1.27_05, -0.35_23, -0.55_13] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
A = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' ,device_map='auto' )
A = model(torch.tensor(A_ ) )
A = torch.tensor(
[[-4.23_27, -3.33_60, -4.66_65, -4.76_31, -1.81_80, -3.41_70, -1.42_11, -3.18_10]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,A_ ,atol=1e-2 ,rtol=1e-2 )
# fmt: off
A = torch.tensor([-9.49_22, -3.95_51, 1.79_98, -5.67_58, -5.10_55, -5.89_84, -4.83_20, -6.80_86, -6.53_91, -5.61_72, -5.58_20, -5.53_52, 1.78_81, 3.62_89, -6.51_17, -3.47_85, -9.50_00, -6.03_52, -6.81_25, -6.01_95, -6.68_36, -5.47_27, -6.28_12, -6.03_91, -7.33_98, -7.42_97, -7.48_44, -6.58_20, -5.87_89, -5.53_12] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,A_ ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip('Model is curently gated' )
@slow
def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
A = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
A = 'Simply put, the theory of relativity states that '
A = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
A = tokenizer.encode(A_ ,return_tensors='pt' )
A = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' ,device_map='sequential' ,use_safetensors=A_ )
# greedy generation outputs
A = model.generate(A_ ,max_new_tokens=64 ,top_p=A_ ,temperature=1 ,do_sample=A_ )
A = tokenizer.decode(generated_ids[0] ,skip_special_tokens=A_ )
self.assertEqual(A_ ,A_ ) | 91 | 0 |
'''simple docstring'''
from torch import nn
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F"""Unsupported activation function: {act_fn}""" )
| 708 |
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self : Optional[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : Dict=13 ,lowerCamelCase__ : Tuple=7 ,lowerCamelCase__ : Optional[Any]=6 ,lowerCamelCase__ : List[str]=17 ,lowerCamelCase__ : Union[str, Any]=23 ,lowerCamelCase__ : Tuple=11 ,lowerCamelCase__ : Any=True ,) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = act_dim
SCREAMING_SNAKE_CASE = state_dim
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = max_length
SCREAMING_SNAKE_CASE = is_training
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
SCREAMING_SNAKE_CASE = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
SCREAMING_SNAKE_CASE = floats_tensor((self.batch_size, self.seq_length, 1) )
SCREAMING_SNAKE_CASE = floats_tensor((self.batch_size, self.seq_length, 1) )
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, self.seq_length) ,vocab_size=1000 )
SCREAMING_SNAKE_CASE = random_attention_mask((self.batch_size, self.seq_length) )
SCREAMING_SNAKE_CASE = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size ,seq_length=self.seq_length ,act_dim=self.act_dim ,state_dim=self.state_dim ,hidden_size=self.hidden_size ,max_length=self.max_length ,)
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Any ,lowerCamelCase__ : Union[str, Any] ,) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = DecisionTransformerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
self.parent.assertEqual(result.state_preds.shape ,states.shape )
self.parent.assertEqual(result.action_preds.shape ,actions.shape )
self.parent.assertEqual(result.return_preds.shape ,returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
), (
SCREAMING_SNAKE_CASE
),
) = config_and_inputs
SCREAMING_SNAKE_CASE = {
"""states""": states,
"""actions""": actions,
"""rewards""": rewards,
"""returns_to_go""": returns_to_go,
"""timesteps""": timesteps,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__snake_case : Tuple = (DecisionTransformerModel,) if is_torch_available() else ()
__snake_case : Dict = ()
__snake_case : List[Any] = {"feature-extraction": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
__snake_case : int = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
__snake_case : Optional[Any] = False
__snake_case : Optional[Any] = False
__snake_case : int = False
__snake_case : str = False
__snake_case : Union[str, Any] = False
__snake_case : Union[str, Any] = False
__snake_case : List[Any] = False
__snake_case : Optional[int] = False
__snake_case : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = DecisionTransformerModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase__ ,hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE = DecisionTransformerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE = [
"""states""",
"""actions""",
"""rewards""",
"""returns_to_go""",
"""timesteps""",
"""attention_mask""",
]
self.assertListEqual(arg_names[: len(lowerCamelCase__ )] ,lowerCamelCase__ )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 2 # number of steps of autoregressive prediction we will perform
SCREAMING_SNAKE_CASE = 10 # defined by the RL environment, may be normalized
SCREAMING_SNAKE_CASE = DecisionTransformerModel.from_pretrained("""edbeeching/decision-transformer-gym-hopper-expert""" )
SCREAMING_SNAKE_CASE = model.to(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = model.config
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = torch.randn(1 ,1 ,config.state_dim ).to(device=lowerCamelCase__ ,dtype=torch.floataa ) # env.reset()
SCREAMING_SNAKE_CASE = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] ,device=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = torch.tensor(lowerCamelCase__ ,device=lowerCamelCase__ ,dtype=torch.floataa ).reshape(1 ,1 ,1 )
SCREAMING_SNAKE_CASE = state
SCREAMING_SNAKE_CASE = torch.zeros(1 ,0 ,config.act_dim ,device=lowerCamelCase__ ,dtype=torch.floataa )
SCREAMING_SNAKE_CASE = torch.zeros(1 ,0 ,device=lowerCamelCase__ ,dtype=torch.floataa )
SCREAMING_SNAKE_CASE = torch.tensor(0 ,device=lowerCamelCase__ ,dtype=torch.long ).reshape(1 ,1 )
for step in range(lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = torch.cat([actions, torch.zeros(1 ,1 ,config.act_dim ,device=lowerCamelCase__ )] ,dim=1 )
SCREAMING_SNAKE_CASE = torch.cat([rewards, torch.zeros(1 ,1 ,device=lowerCamelCase__ )] ,dim=1 )
SCREAMING_SNAKE_CASE = torch.ones(1 ,states.shape[1] ).to(dtype=torch.long ,device=states.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = model(
states=lowerCamelCase__ ,actions=lowerCamelCase__ ,rewards=lowerCamelCase__ ,returns_to_go=lowerCamelCase__ ,timesteps=lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,return_dict=lowerCamelCase__ ,)
self.assertEqual(action_pred.shape ,actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] ,expected_outputs[step] ,atol=1e-4 ) )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = ( # env.step(action)
torch.randn(1 ,1 ,config.state_dim ).to(device=lowerCamelCase__ ,dtype=torch.floataa ),
1.0,
False,
{},
)
SCREAMING_SNAKE_CASE = action_pred[0, -1]
SCREAMING_SNAKE_CASE = torch.cat([states, state] ,dim=1 )
SCREAMING_SNAKE_CASE = returns_to_go[0, -1] - reward
SCREAMING_SNAKE_CASE = torch.cat([returns_to_go, pred_return.reshape(1 ,1 ,1 )] ,dim=1 )
SCREAMING_SNAKE_CASE = torch.cat(
[timesteps, torch.ones((1, 1) ,device=lowerCamelCase__ ,dtype=torch.long ) * (step + 1)] ,dim=1 )
| 116 | 0 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : str =["""image_processor""", """tokenizer"""]
__UpperCAmelCase : str ="""CLIPImageProcessor"""
__UpperCAmelCase : List[Any] =("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , __a=None , __a=None , **__a ):
__lowerCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
__lowerCAmelCase = kwargs.pop("feature_extractor" )
__lowerCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self , __a=None , __a=None , __a=None , **__a ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
__lowerCAmelCase = self.tokenizer(__a , return_tensors=__a , **__a )
if images is not None:
__lowerCAmelCase = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None and images is not None:
__lowerCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def snake_case ( self , *__a , **__a ):
return self.tokenizer.batch_decode(*__a , **__a )
def snake_case ( self , *__a , **__a ):
return self.tokenizer.decode(*__a , **__a )
@property
def snake_case ( self ):
__lowerCAmelCase = self.tokenizer.model_input_names
__lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def snake_case ( self ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def snake_case ( self ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 636 |
'''simple docstring'''
import unittest
import numpy as np
def _lowercase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , ):
'''simple docstring'''
__A : List[Any] = np.shape(SCREAMING_SNAKE_CASE )
__A : List[str] = np.shape(SCREAMING_SNAKE_CASE )
__A : Dict = np.shape(SCREAMING_SNAKE_CASE )
if shape_a[0] != shape_b[0]:
__A : Tuple = (
"Expected the same number of rows for A and B. "
f"Instead found A of size {shape_a} and B of size {shape_b}"
)
raise ValueError(SCREAMING_SNAKE_CASE )
if shape_b[1] != shape_c[1]:
__A : Optional[Any] = (
"Expected the same number of columns for B and C. "
f"Instead found B of size {shape_b} and C of size {shape_c}"
)
raise ValueError(SCREAMING_SNAKE_CASE )
__A : Tuple = pseudo_inv
if a_inv is None:
try:
__A : Optional[Any] = np.linalg.inv(SCREAMING_SNAKE_CASE )
except np.linalg.LinAlgError:
raise ValueError(
"Input matrix A is not invertible. Cannot compute Schur complement." )
return mat_c - mat_b.T @ a_inv @ mat_b
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Any = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__A : Tuple = np.array([[0, 3], [3, 0], [2, 3]] )
__A : Tuple = np.array([[2, 1], [6, 3]] )
__A : Optional[Any] = schur_complement(lowerCamelCase , lowerCamelCase , lowerCamelCase )
__A : int = np.block([[a, b], [b.T, c]] )
__A : int = np.linalg.det(lowerCamelCase )
__A : List[str] = np.linalg.det(lowerCamelCase )
__A : Any = np.linalg.det(lowerCamelCase )
self.assertAlmostEqual(lowerCamelCase , det_a * det_s )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__A : List[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
__A : List[str] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowerCamelCase ):
schur_complement(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Any = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__A : Union[str, Any] = np.array([[0, 3], [3, 0], [2, 3]] )
__A : Optional[Any] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowerCamelCase ):
schur_complement(lowerCamelCase , lowerCamelCase , lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 111 | 0 |
'''simple docstring'''
def __UpperCamelCase ( a : str ) ->int:
assert column_title.isupper()
snake_case = 0
snake_case = len(a ) - 1
snake_case = 0
while index >= 0:
snake_case = (ord(column_title[index] ) - 64) * pow(26 , a )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 44 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _lowercase ( metaclass=__a ):
_UpperCAmelCase = ['''transformers''', '''torch''', '''note_seq''']
def __init__( self , *A__ , **A__ ) -> Union[str, Any]:
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def UpperCamelCase ( cls , *A__ , **A__ ) -> Optional[Any]:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def UpperCamelCase ( cls , *A__ , **A__ ) -> Any:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 44 | 1 |
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = """▁"""
UpperCAmelCase = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
UpperCAmelCase = {
"""vocab_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json""",
},
"""spm_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_config_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json""",
},
}
UpperCAmelCase = {
"""facebook/m2m100_418M""": 1024,
}
# fmt: off
UpperCAmelCase = {
"""m2m100""": ["""af""", """am""", """ar""", """ast""", """az""", """ba""", """be""", """bg""", """bn""", """br""", """bs""", """ca""", """ceb""", """cs""", """cy""", """da""", """de""", """el""", """en""", """es""", """et""", """fa""", """ff""", """fi""", """fr""", """fy""", """ga""", """gd""", """gl""", """gu""", """ha""", """he""", """hi""", """hr""", """ht""", """hu""", """hy""", """id""", """ig""", """ilo""", """is""", """it""", """ja""", """jv""", """ka""", """kk""", """km""", """kn""", """ko""", """lb""", """lg""", """ln""", """lo""", """lt""", """lv""", """mg""", """mk""", """ml""", """mn""", """mr""", """ms""", """my""", """ne""", """nl""", """no""", """ns""", """oc""", """or""", """pa""", """pl""", """ps""", """pt""", """ro""", """ru""", """sd""", """si""", """sk""", """sl""", """so""", """sq""", """sr""", """ss""", """su""", """sv""", """sw""", """ta""", """th""", """tl""", """tn""", """tr""", """uk""", """ur""", """uz""", """vi""", """wo""", """xh""", """yi""", """yo""", """zh""", """zu"""],
"""wmt21""": ["""en""", """ha""", """is""", """ja""", """cs""", """ru""", """zh""", """de"""]
}
class lowercase__ ( A_ ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = ['''input_ids''', '''attention_mask''']
__UpperCAmelCase = []
__UpperCAmelCase = []
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE="<s>" , SCREAMING_SNAKE_CASE="</s>" , SCREAMING_SNAKE_CASE="</s>" , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="m2m100" , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE=8 , **SCREAMING_SNAKE_CASE , ) -> None:
_lowerCamelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCamelCase : List[Any] = language_codes
_lowerCamelCase : List[Any] = FAIRSEQ_LANGUAGE_CODES[language_codes]
_lowerCamelCase : str = {lang_code: F'__{lang_code}__' for lang_code in fairseq_language_code}
_lowerCamelCase : Optional[Any] = kwargs.get("""additional_special_tokens""" , [])
kwargs["additional_special_tokens"] += [
self.get_lang_token(SCREAMING_SNAKE_CASE)
for lang_code in fairseq_language_code
if self.get_lang_token(SCREAMING_SNAKE_CASE) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=SCREAMING_SNAKE_CASE , tgt_lang=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , language_codes=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
_lowerCamelCase : Union[str, Any] = vocab_file
_lowerCamelCase : str = load_json(SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[Any] = {v: k for k, v in self.encoder.items()}
_lowerCamelCase : Optional[int] = spm_file
_lowerCamelCase : Union[str, Any] = load_spm(SCREAMING_SNAKE_CASE , self.sp_model_kwargs)
_lowerCamelCase : Optional[int] = len(self.encoder)
_lowerCamelCase : Dict = {
self.get_lang_token(SCREAMING_SNAKE_CASE): self.encoder_size + i for i, lang_code in enumerate(SCREAMING_SNAKE_CASE)
}
_lowerCamelCase : List[Any] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(SCREAMING_SNAKE_CASE)}
_lowerCamelCase : str = {v: k for k, v in self.lang_token_to_id.items()}
_lowerCamelCase : List[str] = src_lang if src_lang is not None else """en"""
_lowerCamelCase : List[str] = tgt_lang
_lowerCamelCase : List[str] = self.get_lang_id(self._src_lang)
self.set_src_lang_special_tokens(self._src_lang)
_lowerCamelCase : Dict = num_madeup_words
@property
def UpperCamelCase_ ( self) -> int:
return len(self.encoder) + len(self.lang_token_to_id)
@property
def UpperCamelCase_ ( self) -> str:
return self._src_lang
@src_lang.setter
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> None:
_lowerCamelCase : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> List[str]:
return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> List[str]:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(SCREAMING_SNAKE_CASE , self.encoder[self.unk_token])
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(SCREAMING_SNAKE_CASE , self.unk_token)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Dict:
_lowerCamelCase : Tuple = []
_lowerCamelCase : Optional[int] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE) + token
_lowerCamelCase : Tuple = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE)
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE)
return out_string.strip()
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = [1] * len(self.prefix_tokens)
_lowerCamelCase : Dict = [1] * len(self.suffix_tokens)
if token_ids_a is None:
return prefix_ones + ([0] * len(SCREAMING_SNAKE_CASE)) + suffix_ones
return prefix_ones + ([0] * len(SCREAMING_SNAKE_CASE)) + ([0] * len(SCREAMING_SNAKE_CASE)) + suffix_ones
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase_ ( self) -> Dict:
_lowerCamelCase : Dict = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> Dict:
_lowerCamelCase : str = self.__dict__.copy()
_lowerCamelCase : List[Any] = None
return state
def __setstate__( self , SCREAMING_SNAKE_CASE) -> None:
_lowerCamelCase : Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs"""):
_lowerCamelCase : int = {}
_lowerCamelCase : Any = load_spm(self.spm_file , self.sp_model_kwargs)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None) -> Tuple[str]:
_lowerCamelCase : Optional[int] = Path(SCREAMING_SNAKE_CASE)
if not save_dir.is_dir():
raise OSError(F'{save_directory} should be a directory')
_lowerCamelCase : Any = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
_lowerCamelCase : Optional[Any] = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , SCREAMING_SNAKE_CASE)
if os.path.abspath(self.spm_file) != os.path.abspath(SCREAMING_SNAKE_CASE) and os.path.isfile(self.spm_file):
copyfile(self.spm_file , SCREAMING_SNAKE_CASE)
elif not os.path.isfile(self.spm_file):
with open(SCREAMING_SNAKE_CASE , """wb""") as fi:
_lowerCamelCase : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE)
return (str(SCREAMING_SNAKE_CASE), str(SCREAMING_SNAKE_CASE))
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = "en" , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "ro" , **SCREAMING_SNAKE_CASE , ) -> BatchEncoding:
_lowerCamelCase : List[Any] = src_lang
_lowerCamelCase : Union[str, Any] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang)
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE) -> List[str]:
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""")
_lowerCamelCase : List[str] = src_lang
_lowerCamelCase : str = self(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[Any] = self.get_lang_id(SCREAMING_SNAKE_CASE)
_lowerCamelCase : str = tgt_lang_id
return inputs
def UpperCamelCase_ ( self) -> Any:
self.set_src_lang_special_tokens(self.src_lang)
def UpperCamelCase_ ( self) -> Tuple:
self.set_tgt_lang_special_tokens(self.tgt_lang)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> None:
_lowerCamelCase : List[Any] = self.get_lang_token(SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[str] = self.lang_token_to_id[lang_token]
_lowerCamelCase : str = [self.cur_lang_id]
_lowerCamelCase : int = [self.eos_token_id]
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> None:
_lowerCamelCase : int = self.get_lang_token(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[Any] = self.lang_token_to_id[lang_token]
_lowerCamelCase : List[Any] = [self.cur_lang_id]
_lowerCamelCase : Tuple = [self.eos_token_id]
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> str:
return self.lang_code_to_token[lang]
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> int:
_lowerCamelCase : int = self.get_lang_token(SCREAMING_SNAKE_CASE)
return self.lang_token_to_id[lang_token]
def _snake_case ( __snake_case : str , __snake_case : Dict[str, Any] ):
"""simple docstring"""
_lowerCamelCase : List[str] = sentencepiece.SentencePieceProcessor(**__snake_case )
spm.Load(str(__snake_case ) )
return spm
def _snake_case ( __snake_case : str ):
"""simple docstring"""
with open(__snake_case , """r""" ) as f:
return json.load(__snake_case )
def _snake_case ( __snake_case : List[str] , __snake_case : str ):
"""simple docstring"""
with open(__snake_case , """w""" ) as f:
json.dump(__snake_case , __snake_case , indent=2 )
| 88 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = [[0 for _ in range(_lowerCamelCase )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__snake_case = 1
for n in range(m + 1 ):
for k in range(1 , _lowerCamelCase ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
UpperCAmelCase_ : List[str] = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
UpperCAmelCase_ : Union[str, Any] = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 24 | 0 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ ):
# Construct model
if gpta_config_file == "":
_lowercase: Union[str, Any] = GPTaConfig()
else:
_lowercase: int = GPTaConfig.from_json_file(__magic_name__ )
_lowercase: int = GPTaModel(__magic_name__ )
# Load weights from numpy
load_tf_weights_in_gpta(__magic_name__ , __magic_name__ , __magic_name__ )
# Save pytorch-model
_lowercase: Union[str, Any] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
_lowercase: Any = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(f"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , __magic_name__ )
print(f"Save configuration file to {pytorch_config_dump_path}" )
with open(__magic_name__ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
_SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 206 |
def __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
return abs(__magic_name__ ) if a == 0 else greatest_common_divisor(b % a , __magic_name__ )
def __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
while y: # --> when y=0 then loop will terminate and return x as final GCD.
_lowercase , _lowercase: Any = y, x % y
return abs(__magic_name__ )
def __lowerCAmelCase ( ):
try:
_lowercase: Optional[int] = input("Enter two integers separated by comma (,): " ).split("," )
_lowercase: Any = int(nums[0] )
_lowercase: int = int(nums[1] )
print(
f"greatest_common_divisor({num_a}, {num_a}) = "
f"{greatest_common_divisor(__magic_name__ , __magic_name__ )}" )
print(f"By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(__magic_name__ , __magic_name__ )}" )
except (IndexError, UnboundLocalError, ValueError):
print("Wrong input" )
if __name__ == "__main__":
main()
| 206 | 1 |
'''simple docstring'''
import os
import sys
import unittest
_lowerCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
_lowerCAmelCase = os.path.join("tests", "models", "bert", "test_modeling_bert.py")
_lowerCAmelCase = os.path.join("tests", "models", "blip", "test_modeling_blip.py")
class __A ( unittest.TestCase ):
"""simple docstring"""
def snake_case_( self )-> Dict:
lowercase__ = get_test_to_tester_mapping(_lowerCamelCase )
lowercase__ = get_test_to_tester_mapping(_lowerCamelCase )
lowercase__ = {'''BertModelTest''': '''BertModelTester'''}
lowercase__ = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(get_test_info.to_json(_lowerCamelCase ) , _lowerCamelCase )
def snake_case_( self )-> List[str]:
lowercase__ = get_model_to_test_mapping(_lowerCamelCase )
lowercase__ = get_model_to_test_mapping(_lowerCamelCase )
lowercase__ = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
lowercase__ = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(get_test_info.to_json(_lowerCamelCase ) , _lowerCamelCase )
def snake_case_( self )-> Any:
lowercase__ = get_model_to_tester_mapping(_lowerCamelCase )
lowercase__ = get_model_to_tester_mapping(_lowerCamelCase )
lowercase__ = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
lowercase__ = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(_lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(get_test_info.to_json(_lowerCamelCase ) , _lowerCamelCase )
| 161 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __A ( a ):
"""simple docstring"""
A_ = 'vit_msn'
def __init__( self , _lowerCamelCase=7_6_8 , _lowerCamelCase=1_2 , _lowerCamelCase=1_2 , _lowerCamelCase=3_0_7_2 , _lowerCamelCase="gelu" , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-06 , _lowerCamelCase=2_2_4 , _lowerCamelCase=1_6 , _lowerCamelCase=3 , _lowerCamelCase=True , **_lowerCamelCase , )-> Optional[Any]:
super().__init__(**_lowerCamelCase )
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = qkv_bias
| 161 | 1 |
def _lowercase ( SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : int = 1_000 ):
"""simple docstring"""
UpperCamelCase = 1
UpperCamelCase = 0
for divide_by_number in range(SCREAMING_SNAKE_CASE_ , digit + 1 ):
UpperCamelCase = []
UpperCamelCase = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase = len(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = divide_by_number
else:
has_been_divided.append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 181 |
def _lowercase ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
assert (
isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and number_of_steps > 0
), f'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
UpperCamelCase , UpperCamelCase = 1, 1
for _ in range(number_of_steps - 1 ):
UpperCamelCase , UpperCamelCase = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 181 | 1 |
import json
import sys
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[int]:
with open(SCREAMING_SNAKE_CASE_ , encoding="""utf-8""" ) as f:
_lowercase = json.load(SCREAMING_SNAKE_CASE_ )
_lowercase = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(SCREAMING_SNAKE_CASE_ ):
_lowercase = results[benchmark_name]
_lowercase = benchmark_name.split("""/""" )[-1]
output_md.append(f"""### Benchmark: {benchmark_file_name}""" )
_lowercase = """| metric |"""
_lowercase = """|--------|"""
_lowercase = """| new / old (diff) |"""
for metric_name in sorted(SCREAMING_SNAKE_CASE_ ):
_lowercase = benchmark_res[metric_name]
_lowercase = metric_vals["""new"""]
_lowercase = metric_vals.get("""old""" , SCREAMING_SNAKE_CASE_ )
_lowercase = metric_vals.get("""diff""" , SCREAMING_SNAKE_CASE_ )
_lowercase = f""" {new_val:f}""" if isinstance(SCREAMING_SNAKE_CASE_ , (int, float) ) else """None"""
if old_val is not None:
val_str += f""" / {old_val:f}""" if isinstance(SCREAMING_SNAKE_CASE_ , (int, float) ) else "None"
if dif_val is not None:
val_str += f""" ({dif_val:f})""" if isinstance(SCREAMING_SNAKE_CASE_ , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(SCREAMING_SNAKE_CASE_ , """w""" , encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
A : Union[str, Any] = sys.argv[1]
A : Dict = sys.argv[2]
format_json_to_md(input_json_file, output_md_file) | 287 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : int = 2_00_00_00 ) -> int:
_lowercase = [0 for i in range(n + 1 )]
_lowercase = 1
_lowercase = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , SCREAMING_SNAKE_CASE_ ):
_lowercase = 1
_lowercase = 0
for i in range(SCREAMING_SNAKE_CASE_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F'{solution() = }') | 287 | 1 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE_ ( __UpperCAmelCase ):
'''simple docstring'''
lowercase : Any = ["image_processor", "tokenizer"]
lowercase : Dict = "BridgeTowerImageProcessor"
lowercase : int = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str ) -> Optional[Any]:
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
def __call__( self : str , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Union[bool, str, PaddingStrategy] = False , SCREAMING_SNAKE_CASE__ : Union[bool, str, TruncationStrategy] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , **SCREAMING_SNAKE_CASE__ : str , ) -> Any:
A : Dict =self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
# add pixel_values + pixel_mask
A : Tuple =self.image_processor(
lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , do_normalize=lowerCAmelCase_ , do_center_crop=lowerCAmelCase_ , **lowerCAmelCase_ )
encoding.update(lowerCAmelCase_ )
return encoding
def SCREAMING_SNAKE_CASE_ ( self : Any , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Dict ) -> Optional[Any]:
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : str ) -> Tuple:
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> Any:
A : int =self.tokenizer.model_input_names
A : Tuple =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 712 | import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_lowercase : int =2
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self : List[Any] , *, # begin keyword-only arguments
SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Optional[Any]="<unk>" , SCREAMING_SNAKE_CASE__ : int=None , ) -> List[Any]:
A , A , A , A : Optional[Any] =bos, unk, pad, eos
A : Dict =[]
A : Union[str, Any] =[]
A : Any ={}
A : int =self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : Any =self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : List[Any] =self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : List[str] =self.add_symbol(SCREAMING_SNAKE_CASE__ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(SCREAMING_SNAKE_CASE__ )
A : List[str] =len(self.symbols )
def __eq__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> str:
return self.indices == other.indices
def __getitem__( self : int , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : List[Any] ) -> Union[str, Any]:
return len(self.symbols )
def __contains__( self : Dict , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
return sym in self.indices
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[Any] , SCREAMING_SNAKE_CASE__ : int ) -> Any:
A : Union[str, Any] =cls()
d.add_from_file(SCREAMING_SNAKE_CASE__ )
return d
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=1 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ) -> Any:
if word in self.indices and not overwrite:
A : int =self.indices[word]
A : Union[str, Any] =self.count[idx] + n
return idx
else:
A : Tuple =len(self.symbols )
A : str =idx
self.symbols.append(SCREAMING_SNAKE_CASE__ )
self.count.append(SCREAMING_SNAKE_CASE__ )
return idx
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[Any]:
return 0
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE__ : List[str] ) -> Optional[Any]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
try:
with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(SCREAMING_SNAKE_CASE__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(SCREAMING_SNAKE_CASE__ ) )
return
A : str =f.readlines()
A : int =self._load_meta(SCREAMING_SNAKE_CASE__ )
for line in lines[indices_start_line:]:
try:
A , A : Optional[int] =line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
A : int =True
A , A : Optional[Any] =line.rsplit(' ' , 1 )
else:
A : Any =False
A : Tuple =int(SCREAMING_SNAKE_CASE__ )
A : Optional[int] =line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(SCREAMING_SNAKE_CASE__ ) )
self.add_symbol(SCREAMING_SNAKE_CASE__ , n=SCREAMING_SNAKE_CASE__ , overwrite=SCREAMING_SNAKE_CASE__ )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def A__ ( lowercase: Union[str, Any] ) -> str:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
A : int =dict((re.sub(r'@@$', '', lowercase ), v) if k.endswith('@@' ) else (re.sub(r'$', '</w>', lowercase ), v) for k, v in d.items() )
A : int ='<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
A : List[Any] =d[k] # restore
return da
def A__ ( lowercase: Optional[int], lowercase: Optional[Any] ) -> str:
# prep
if not os.path.exists(lowercase ):
raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' )
os.makedirs(lowercase, exist_ok=lowercase )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
A : List[str] =os.path.join(lowercase, 'checkpoint.pt' )
if not os.path.isfile(lowercase ):
raise ValueError(F'path to the file {checkpoint_file} does not exist!' )
A : Optional[Any] =torch.load(lowercase, map_location='cpu' )
A : Any =chkpt['cfg']['model']
# dicts
A : Any =os.path.join(lowercase, 'dict.txt' )
if not os.path.isfile(lowercase ):
raise ValueError(F'path to the file {dict_file} does not exist!' )
A : Dict =Dictionary.load(lowercase )
A : Optional[Any] =rewrite_dict_keys(src_dict.indices )
A : Tuple =len(lowercase )
A : Any =os.path.join(lowercase, VOCAB_FILES_NAMES['vocab_file'] )
print(F'Generating {src_vocab_file} of {src_vocab_size} records' )
with open(lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowercase, ensure_ascii=lowercase, indent=lowercase ) )
# merges_file (bpecodes)
A : List[str] =os.path.join(lowercase, 'bpecodes' )
if not os.path.isfile(lowercase ):
raise ValueError(F'path to the file {bpecodes_file} does not exist!' )
A : List[str] =os.path.join(lowercase, VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(lowercase, lowercase )
# model config
A : Tuple =os.path.join(lowercase, 'config.json' )
A : Tuple ={
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1e-1_2,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(F'Generating {biogpt_model_config_file}' )
with open(lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowercase, ensure_ascii=lowercase, indent=lowercase ) )
# tokenizer config
A : int =os.path.join(lowercase, lowercase )
A : List[str] ={
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 1_024,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(F'Generating {biogpt_tokenizer_config_file}' )
with open(lowercase, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(lowercase, ensure_ascii=lowercase, indent=lowercase ) )
# model
A : List[Any] =chkpt['model']
# remove unneeded keys
A : List[Any] =[
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(lowercase, lowercase )
A : str =list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
A : Union[str, Any] =model_state_dict.pop(lowercase )
else:
A : List[str] =model_state_dict.pop(lowercase )
A : Any =BioGptConfig.from_pretrained(lowercase )
A : str =BioGptForCausalLM(lowercase )
# check that it loads ok
model_new.load_state_dict(lowercase )
# save
A : Tuple =os.path.join(lowercase, lowercase )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(lowercase, lowercase )
print('Conversion is done!' )
if __name__ == "__main__":
_lowercase : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowercase : List[Any] =parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 661 | 0 |
'''simple docstring'''
from __future__ import annotations
import queue
class a_ :
def __init__( self : List[str] , a_ : Tuple ) -> Optional[int]:
snake_case: Any =data
snake_case: Optional[Any] =None
snake_case: Union[str, Any] =None
def a_ ( ) -> Dict:
"""simple docstring"""
print('\n********Press N to stop entering at any point of time********\n' )
snake_case: Union[str, Any] =input('Enter the value of the root node: ' ).strip().lower()
snake_case: List[Any] =queue.Queue()
snake_case: Optional[int] =TreeNode(int(lowercase__ ) )
q.put(lowercase__ )
while not q.empty():
snake_case: str =q.get()
snake_case: int =f'''Enter the left node of {node_found.data}: '''
snake_case: Tuple =input(lowercase__ ).strip().lower() or 'n'
if check == "n":
return tree_node
snake_case: Union[str, Any] =TreeNode(int(lowercase__ ) )
snake_case: List[Any] =left_node
q.put(lowercase__ )
snake_case: str =f'''Enter the right node of {node_found.data}: '''
snake_case: int =input(lowercase__ ).strip().lower() or 'n'
if check == "n":
return tree_node
snake_case: str =TreeNode(int(lowercase__ ) )
snake_case: Optional[int] =right_node
q.put(lowercase__ )
raise
def a_ ( __UpperCAmelCase ) -> int:
"""simple docstring"""
if not isinstance(lowercase__ , lowercase__ ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def a_ ( __UpperCAmelCase ) -> int:
"""simple docstring"""
if not isinstance(lowercase__ , lowercase__ ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def a_ ( __UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
if not isinstance(lowercase__ , lowercase__ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def a_ ( __UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(lowercase__ , lowercase__ ) or not node:
return
snake_case: Optional[int] =queue.Queue()
q.put(lowercase__ )
while not q.empty():
snake_case: List[Any] =q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def a_ ( __UpperCAmelCase ) -> Any:
"""simple docstring"""
if not isinstance(lowercase__ , lowercase__ ) or not node:
return
snake_case: List[str] =queue.Queue()
q.put(lowercase__ )
while not q.empty():
snake_case: int =[]
while not q.empty():
snake_case: Union[str, Any] =q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(lowercase__ )
def a_ ( __UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(lowercase__ , lowercase__ ) or not node:
return
snake_case: str =[]
snake_case: str =node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(lowercase__ )
snake_case: List[str] =n.left
# end of while means current node doesn't have left child
snake_case: Union[str, Any] =stack.pop()
# start to traverse its right child
snake_case: Union[str, Any] =n.right
def a_ ( __UpperCAmelCase ) -> List[str]:
"""simple docstring"""
if not isinstance(lowercase__ , lowercase__ ) or not node:
return
snake_case: List[Any] =[]
snake_case: Any =node
while n or stack:
while n:
stack.append(lowercase__ )
snake_case: Tuple =n.left
snake_case: Tuple =stack.pop()
print(n.data , end=',' )
snake_case: Union[str, Any] =n.right
def a_ ( __UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(lowercase__ , lowercase__ ) or not node:
return
snake_case , snake_case: Tuple =[], []
snake_case: List[Any] =node
stacka.append(lowercase__ )
while stacka: # to find the reversed order of post order, store it in stack2
snake_case: List[str] =stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(lowercase__ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def a_ ( __UpperCAmelCase = "" , __UpperCAmelCase=50 , __UpperCAmelCase="*" ) -> Tuple:
"""simple docstring"""
if not s:
return "\n" + width * char
snake_case , snake_case: Any =divmod(width - len(lowercase__ ) - 2 , 2 )
return f'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('Binary Tree Traversals'))
a = build_tree()
print(prompt('Pre Order Traversal'))
pre_order(node)
print(prompt() + '\n')
print(prompt('In Order Traversal'))
in_order(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal'))
post_order(node)
print(prompt() + '\n')
print(prompt('Level Order Traversal'))
level_order(node)
print(prompt() + '\n')
print(prompt('Actual Level Order Traversal'))
level_order_actual(node)
print('*' * 50 + '\n')
print(prompt('Pre Order Traversal - Iteration Version'))
pre_order_iter(node)
print(prompt() + '\n')
print(prompt('In Order Traversal - Iteration Version'))
in_order_iter(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal - Iteration Version'))
post_order_iter(node)
print(prompt())
| 350 |
"""simple docstring"""
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class __snake_case :
__a = None
def __a ( self: int ):
__lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
__lowerCamelCase = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , A_ )
def __a ( self: List[str] ):
__lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase = os.path.join(A_ , """feat_extract.json""" )
feat_extract_first.to_json_file(A_ )
__lowerCamelCase = self.feature_extraction_class.from_json_file(A_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __a ( self: Optional[Any] ):
__lowerCamelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase = feat_extract_first.save_pretrained(A_ )[0]
check_json_file_has_correct_format(A_ )
__lowerCamelCase = self.feature_extraction_class.from_pretrained(A_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __a ( self: List[Any] ):
__lowerCamelCase = self.feature_extraction_class()
self.assertIsNotNone(A_ )
| 281 | 0 |
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class __a :
def __init__( self : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any]=13 , UpperCAmelCase_ : str=7 , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Dict=99 , UpperCAmelCase_ : List[str]=64 , UpperCAmelCase_ : Any=32 , UpperCAmelCase_ : Tuple=5 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : int=37 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : int=512 , UpperCAmelCase_ : str=16 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : List[Any]=None , )-> List[str]:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = embedding_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = scope
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> Any:
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , )
def _SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] )-> int:
"""simple docstring"""
UpperCamelCase = MobileBertModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
UpperCamelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
UpperCamelCase = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
UpperCamelCase = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] )-> str:
"""simple docstring"""
UpperCamelCase = MobileBertForMaskedLM(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
UpperCamelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int )-> int:
"""simple docstring"""
UpperCamelCase = MobileBertForNextSentencePrediction(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
UpperCamelCase = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = MobileBertForPreTraining(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
UpperCamelCase = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , next_sentence_label=UpperCAmelCase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = MobileBertForQuestionAnswering(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
UpperCamelCase = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] )-> Any:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = MobileBertForSequenceClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
UpperCamelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = MobileBertForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
UpperCamelCase = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.num_choices
UpperCamelCase = MobileBertForMultipleChoice(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> Tuple:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __a ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
UpperCamelCase_ : Optional[int] = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Dict = (
{
'''feature-extraction''': MobileBertModel,
'''fill-mask''': MobileBertForMaskedLM,
'''question-answering''': MobileBertForQuestionAnswering,
'''text-classification''': MobileBertForSequenceClassification,
'''token-classification''': MobileBertForTokenClassification,
'''zero-shot''': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : List[str] = True
def _SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple=False )-> str:
"""simple docstring"""
UpperCamelCase = super()._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ )
if return_labels:
if model_class in get_values(UpperCAmelCase_ ):
UpperCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase_ )
UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_ )
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self : List[Any] )-> Tuple:
"""simple docstring"""
UpperCamelCase = MobileBertModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Dict )-> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Tuple )-> Tuple:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> int:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : int )-> List[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> str:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str )-> int:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] )-> Dict:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] )-> int:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*UpperCAmelCase_ )
def lowerCamelCase__ ( UpperCAmelCase_ )-> List[str]:
"""simple docstring"""
return torch.tensor(
UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class __a ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase = MobileBertModel.from_pretrained("google/mobilebert-uncased" ).to(UpperCAmelCase_ )
UpperCamelCase = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] )
with torch.no_grad():
UpperCamelCase = model(UpperCAmelCase_ )[0]
UpperCamelCase = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , UpperCAmelCase_ )
UpperCamelCase = torch.tensor(
[
[
[-2.4_736_526e07, 8.2_691_656e04, 1.6_521_838e05],
[-5.7_541_704e-01, 3.9_056_022e00, 4.4_011_507e00],
[2.6_047_359e00, 1.5_677_652e00, -1.7_324_188e-01],
]
] , device=UpperCAmelCase_ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
UpperCamelCase = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
UpperCamelCase = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 556 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __a ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
UpperCamelCase = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
UpperCamelCase = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase = model(UpperCAmelCase_ )["last_hidden_state"].detach()
self.assertEqual(output.shape , UpperCAmelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase_ , atol=1e-3 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
UpperCamelCase = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
UpperCamelCase = torch.Size((1, 12, 1_024) ) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase = model(UpperCAmelCase_ )["last_hidden_state"].detach()
self.assertEqual(output.shape , UpperCAmelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase_ , atol=1e-3 ) )
| 556 | 1 |