code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : int )-> int:
"""simple docstring"""
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase__ )
UpperCAmelCase = -1
UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase__ )
UpperCAmelCase = model.generate(UpperCamelCase__ , max_new_tokens=10 , do_sample=UpperCamelCase__ )
UpperCAmelCase = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
UpperCAmelCase = TextStreamer(UpperCamelCase__ )
model.generate(UpperCamelCase__ , max_new_tokens=10 , do_sample=UpperCamelCase__ , streamer=UpperCamelCase__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCAmelCase = cs.out[:-1]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def a__( self : Optional[Any] )-> str:
"""simple docstring"""
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase__ )
UpperCAmelCase = -1
UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase__ )
UpperCAmelCase = model.generate(UpperCamelCase__ , max_new_tokens=10 , do_sample=UpperCamelCase__ )
UpperCAmelCase = tokenizer.decode(greedy_ids[0] )
UpperCAmelCase = TextIteratorStreamer(UpperCamelCase__ )
UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
UpperCAmelCase = Thread(target=model.generate , kwargs=UpperCamelCase__ )
thread.start()
UpperCAmelCase = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def a__( self : str )-> Dict:
"""simple docstring"""
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase__ )
UpperCAmelCase = -1
UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase__ )
UpperCAmelCase = model.generate(UpperCamelCase__ , max_new_tokens=10 , do_sample=UpperCamelCase__ )
UpperCAmelCase = greedy_ids[:, input_ids.shape[1] :]
UpperCAmelCase = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
UpperCAmelCase = TextStreamer(UpperCamelCase__ , skip_prompt=UpperCamelCase__ )
model.generate(UpperCamelCase__ , max_new_tokens=10 , do_sample=UpperCamelCase__ , streamer=UpperCamelCase__ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCAmelCase = cs.out[:-1]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
def a__( self : Dict )-> Tuple:
"""simple docstring"""
UpperCAmelCase = AutoTokenizer.from_pretrained('''distilgpt2''' )
UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(UpperCamelCase__ )
UpperCAmelCase = -1
UpperCAmelCase = torch.ones((1, 5) , device=UpperCamelCase__ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
UpperCAmelCase = TextStreamer(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
model.generate(UpperCamelCase__ , max_new_tokens=1 , do_sample=UpperCamelCase__ , streamer=UpperCamelCase__ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
UpperCAmelCase = cs.out[:-1] # Remove the final "\n"
UpperCAmelCase = tokenizer(UpperCamelCase__ , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def a__( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(UpperCamelCase__ )
UpperCAmelCase = -1
UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCamelCase__ )
UpperCAmelCase = TextIteratorStreamer(UpperCamelCase__ , timeout=0.001 )
UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
UpperCAmelCase = Thread(target=model.generate , kwargs=UpperCamelCase__ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(UpperCamelCase__ ):
UpperCAmelCase = ''''''
for new_text in streamer:
streamer_text += new_text
| 210 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : Collection[float] | None = None ):
if components is None:
A = []
A = list(UpperCamelCase__ )
def __len__( self : List[Any] ):
return len(self.__components )
def __str__( self : str ):
return "(" + ",".join(map(UpperCamelCase__ , self.__components ) ) + ")"
def __add__( self : str , UpperCamelCase__ : Vector ):
A = len(self )
if size == len(UpperCamelCase__ ):
A = [self.__components[i] + other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return Vector(UpperCamelCase__ )
else:
raise Exception('must have the same size' )
def __sub__( self : Dict , UpperCamelCase__ : Vector ):
A = len(self )
if size == len(UpperCamelCase__ ):
A = [self.__components[i] - other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return Vector(UpperCamelCase__ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self : Tuple , UpperCamelCase__ : float ):
...
@overload
def __mul__( self : Dict , UpperCamelCase__ : Vector ):
...
def __mul__( self : Union[str, Any] , UpperCamelCase__ : float | Vector ):
if isinstance(UpperCamelCase__ , (float, int) ):
A = [c * other for c in self.__components]
return Vector(UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(self ) == len(UpperCamelCase__ ):
A = len(self )
A = [self.__components[i] * other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return sum(UpperCamelCase__ )
else: # error case
raise Exception('invalid operand!' )
def UpperCamelCase ( self : Union[str, Any] ):
return Vector(self.__components )
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : int ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def UpperCamelCase ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : float ):
assert -len(self.__components ) <= pos < len(self.__components )
A = value
def UpperCamelCase ( self : str ):
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
A = [c**2 for c in self.__components]
return math.sqrt(sum(UpperCamelCase__ ) )
def UpperCamelCase ( self : Any , UpperCamelCase__ : Vector , UpperCamelCase__ : bool = False ):
A = self * other
A = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def __UpperCamelCase (lowerCAmelCase : int ) -> Vector:
assert isinstance(lowerCAmelCase, lowerCAmelCase )
return Vector([0] * dimension )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> Vector:
assert isinstance(lowerCAmelCase, lowerCAmelCase ) and (isinstance(lowerCAmelCase, lowerCAmelCase ))
A = [0] * dimension
A = 1
return Vector(lowerCAmelCase )
def __UpperCamelCase (lowerCAmelCase : float, lowerCAmelCase : Vector, lowerCAmelCase : Vector ) -> Vector:
assert (
isinstance(lowerCAmelCase, lowerCAmelCase )
and isinstance(lowerCAmelCase, lowerCAmelCase )
and (isinstance(lowerCAmelCase, (int, float) ))
)
return x * scalar + y
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int ) -> Vector:
random.seed(lowerCAmelCase )
A = [random.randint(lowerCAmelCase, lowerCAmelCase ) for _ in range(lowerCAmelCase )]
return Vector(lowerCAmelCase )
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : list[list[float]] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
A = matrix
A = w
A = h
def __str__( self : int ):
A = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Optional[Any] , UpperCamelCase__ : Matrix ):
if self.__width == other.width() and self.__height == other.height():
A = []
for i in range(self.__height ):
A = [
self.__matrix[i][j] + other.component(UpperCamelCase__ , UpperCamelCase__ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase__ )
return Matrix(UpperCamelCase__ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self : Dict , UpperCamelCase__ : Matrix ):
if self.__width == other.width() and self.__height == other.height():
A = []
for i in range(self.__height ):
A = [
self.__matrix[i][j] - other.component(UpperCamelCase__ , UpperCamelCase__ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase__ )
return Matrix(UpperCamelCase__ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self : int , UpperCamelCase__ : float ):
...
@overload
def __mul__( self : Union[str, Any] , UpperCamelCase__ : Vector ):
...
def __mul__( self : Tuple , UpperCamelCase__ : float | Vector ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ): # matrix-vector
if len(UpperCamelCase__ ) == self.__width:
A = zero_vector(self.__height )
for i in range(self.__height ):
A = [
self.__matrix[i][j] * other.component(UpperCamelCase__ )
for j in range(self.__width )
]
ans.change_component(UpperCamelCase__ , sum(UpperCamelCase__ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(UpperCamelCase__ , (int, float) ): # matrix-scalar
A = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(UpperCamelCase__ , self.__width , self.__height )
return None
def UpperCamelCase ( self : Optional[int] ):
return self.__height
def UpperCamelCase ( self : List[Any] ):
return self.__width
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def UpperCamelCase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float ):
if 0 <= x < self.__height and 0 <= y < self.__width:
A = value
else:
raise Exception('change_component: indices out of bounds' )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
A = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(UpperCamelCase__ ) ):
A = minor[i][:y] + minor[i][y + 1 :]
return Matrix(UpperCamelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def UpperCamelCase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(UpperCamelCase__ , UpperCamelCase__ )
else:
raise Exception('Indices out of bounds' )
def UpperCamelCase ( self : Tuple ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
A = [
self.__matrix[0][y] * self.cofactor(0 , UpperCamelCase__ ) for y in range(self.__width )
]
return sum(UpperCamelCase__ )
def __UpperCamelCase (lowerCAmelCase : int ) -> Matrix:
A = [[0] * n for _ in range(lowerCAmelCase )]
return Matrix(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int ) -> Matrix:
random.seed(lowerCAmelCase )
A = [
[random.randint(lowerCAmelCase, lowerCAmelCase ) for _ in range(lowerCAmelCase )] for _ in range(lowerCAmelCase )
]
return Matrix(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
| 699 | 0 |
from math import sqrt
def _lowercase ( UpperCamelCase_ ) -> bool:
'''simple docstring'''
assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) and (
number >= 0
), "'number' must been an int and positive"
SCREAMING_SNAKE_CASE__ = True
# 0 and 1 are none primes.
if number <= 1:
SCREAMING_SNAKE_CASE__ = False
for divisor in range(2 , int(round(sqrt(UpperCamelCase_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
SCREAMING_SNAKE_CASE__ = False
break
# precondition
assert isinstance(UpperCamelCase_ , UpperCamelCase_ ), "'status' must been from type bool"
return status
def _lowercase ( UpperCamelCase_ ) -> str:
'''simple docstring'''
assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
SCREAMING_SNAKE_CASE__ = list(range(2 , n + 1 ) )
SCREAMING_SNAKE_CASE__ = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(UpperCamelCase_ ) ):
for j in range(i + 1 , len(UpperCamelCase_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
SCREAMING_SNAKE_CASE__ = 0
# filters actual prime numbers.
SCREAMING_SNAKE_CASE__ = [x for x in begin_list if x != 0]
# precondition
assert isinstance(UpperCamelCase_ , UpperCamelCase_ ), "'ans' must been from type list"
return ans
def _lowercase ( UpperCamelCase_ ) -> Dict:
'''simple docstring'''
assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) and (n > 2), "'N' must been an int and > 2"
SCREAMING_SNAKE_CASE__ = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(UpperCamelCase_ ):
ans.append(UpperCamelCase_ )
# precondition
assert isinstance(UpperCamelCase_ , UpperCamelCase_ ), "'ans' must been from type list"
return ans
def _lowercase ( UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) and number >= 0, "'number' must been an int and >= 0"
SCREAMING_SNAKE_CASE__ = [] # this list will be returns of the function.
# potential prime number factors.
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = number
if number == 0 or number == 1:
ans.append(UpperCamelCase_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(UpperCamelCase_ ):
while quotient != 1:
if is_prime(UpperCamelCase_ ) and (quotient % factor == 0):
ans.append(UpperCamelCase_ )
quotient /= factor
else:
factor += 1
else:
ans.append(UpperCamelCase_ )
# precondition
assert isinstance(UpperCamelCase_ , UpperCamelCase_ ), "'ans' must been from type list"
return ans
def _lowercase ( UpperCamelCase_ ) -> Any:
'''simple docstring'''
assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
SCREAMING_SNAKE_CASE__ = 0
# prime factorization of 'number'
SCREAMING_SNAKE_CASE__ = prime_factorization(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = max(UpperCamelCase_ )
# precondition
assert isinstance(UpperCamelCase_ , UpperCamelCase_ ), "'ans' must been from type int"
return ans
def _lowercase ( UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
SCREAMING_SNAKE_CASE__ = 0
# prime factorization of 'number'
SCREAMING_SNAKE_CASE__ = prime_factorization(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = min(UpperCamelCase_ )
# precondition
assert isinstance(UpperCamelCase_ , UpperCamelCase_ ), "'ans' must been from type int"
return ans
def _lowercase ( UpperCamelCase_ ) -> int:
'''simple docstring'''
assert isinstance(UpperCamelCase_ , UpperCamelCase_ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , UpperCamelCase_ ), "compare bust been from type bool"
return number % 2 == 0
def _lowercase ( UpperCamelCase_ ) -> Any:
'''simple docstring'''
assert isinstance(UpperCamelCase_ , UpperCamelCase_ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , UpperCamelCase_ ), "compare bust been from type bool"
return number % 2 != 0
def _lowercase ( UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
assert (
isinstance(UpperCamelCase_ , UpperCamelCase_ ) and (number > 2) and is_even(UpperCamelCase_ )
), "'number' must been an int, even and > 2"
SCREAMING_SNAKE_CASE__ = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
SCREAMING_SNAKE_CASE__ = get_prime_numbers(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = len(UpperCamelCase_ )
# run variable for while-loops.
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = None
# exit variable. for break up the loops
SCREAMING_SNAKE_CASE__ = True
while i < len_pn and loop:
SCREAMING_SNAKE_CASE__ = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
SCREAMING_SNAKE_CASE__ = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(UpperCamelCase_ , UpperCamelCase_ )
and (len(UpperCamelCase_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
assert (
isinstance(UpperCamelCase_ , UpperCamelCase_ )
and isinstance(UpperCamelCase_ , UpperCamelCase_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
SCREAMING_SNAKE_CASE__ = 0
while numbera != 0:
SCREAMING_SNAKE_CASE__ = numbera % numbera
SCREAMING_SNAKE_CASE__ = numbera
SCREAMING_SNAKE_CASE__ = rest
# precondition
assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
assert (
isinstance(UpperCamelCase_ , UpperCamelCase_ )
and isinstance(UpperCamelCase_ , UpperCamelCase_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
SCREAMING_SNAKE_CASE__ = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
SCREAMING_SNAKE_CASE__ = prime_factorization(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = prime_factorization(UpperCamelCase_ )
elif numbera == 1 or numbera == 1:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = max(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
SCREAMING_SNAKE_CASE__ = prime_fac_a.count(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = prime_fac_a.count(UpperCamelCase_ )
for _ in range(max(UpperCamelCase_ , UpperCamelCase_ ) ):
ans *= n
else:
SCREAMING_SNAKE_CASE__ = prime_fac_a.count(UpperCamelCase_ )
for _ in range(UpperCamelCase_ ):
ans *= n
done.append(UpperCamelCase_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
SCREAMING_SNAKE_CASE__ = prime_fac_a.count(UpperCamelCase_ )
for _ in range(UpperCamelCase_ ):
ans *= n
done.append(UpperCamelCase_ )
# precondition
assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _lowercase ( UpperCamelCase_ ) -> Tuple:
'''simple docstring'''
assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) and (n >= 0), "'number' must been a positive int"
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(UpperCamelCase_ ):
ans += 1
# precondition
assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) and is_prime(
UpperCamelCase_ ), "'ans' must been a prime number and from type int"
return ans
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> Dict:
'''simple docstring'''
assert (
is_prime(UpperCamelCase_ ) and is_prime(UpperCamelCase_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
SCREAMING_SNAKE_CASE__ = p_number_a + 1 # jump to the next number
SCREAMING_SNAKE_CASE__ = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(UpperCamelCase_ ):
number += 1
while number < p_number_a:
ans.append(UpperCamelCase_ )
number += 1
# fetch the next prime number.
while not is_prime(UpperCamelCase_ ):
number += 1
# precondition
assert (
isinstance(UpperCamelCase_ , UpperCamelCase_ )
and ans[0] != p_number_a
and ans[len(UpperCamelCase_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _lowercase ( UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) and (n >= 1), "'n' must been int and >= 1"
SCREAMING_SNAKE_CASE__ = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(UpperCamelCase_ )
# precondition
assert ans[0] == 1 and ans[len(UpperCamelCase_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _lowercase ( UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) and (
number > 1
), "'number' must been an int and >= 1"
SCREAMING_SNAKE_CASE__ = get_divisors(UpperCamelCase_ )
# precondition
assert (
isinstance(UpperCamelCase_ , UpperCamelCase_ )
and (divisors[0] == 1)
and (divisors[len(UpperCamelCase_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
assert (
isinstance(UpperCamelCase_ , UpperCamelCase_ )
and isinstance(UpperCamelCase_ , UpperCamelCase_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
SCREAMING_SNAKE_CASE__ = gcd(abs(UpperCamelCase_ ) , abs(UpperCamelCase_ ) )
# precondition
assert (
isinstance(UpperCamelCase_ , UpperCamelCase_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _lowercase ( UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) and (n >= 0), "'n' must been a int and >= 0"
SCREAMING_SNAKE_CASE__ = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _lowercase ( UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) and (n >= 0), "'n' must been an int and >= 0"
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 1 # this will be return
for _ in range(n - 1 ):
SCREAMING_SNAKE_CASE__ = ans
ans += fiba
SCREAMING_SNAKE_CASE__ = tmp
return ans
| 472 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = '''blenderbot-small'''
SCREAMING_SNAKE_CASE : Any = ['''past_key_values''']
SCREAMING_SNAKE_CASE : List[str] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] , UpperCamelCase__ : Optional[Any]=50265 , UpperCamelCase__ : Optional[int]=512 , UpperCamelCase__ : int=8 , UpperCamelCase__ : Optional[int]=2048 , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Optional[Any]=8 , UpperCamelCase__ : List[Any]=2048 , UpperCamelCase__ : int=16 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Any=512 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Union[str, Any]=0.0 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Dict=2 , **UpperCamelCase__ : List[str] , ):
A = vocab_size
A = max_position_embeddings
A = d_model
A = encoder_ffn_dim
A = encoder_layers
A = encoder_attention_heads
A = decoder_ffn_dim
A = decoder_layers
A = decoder_attention_heads
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = encoder_layerdrop
A = decoder_layerdrop
A = use_cache
A = encoder_layers
A = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
@property
def UpperCamelCase ( self : List[Any] ):
if self.task in ["default", "seq2seq-lm"]:
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
A = {0: 'batch'}
A = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
A = {0: 'batch', 1: 'decoder_sequence'}
A = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
A , A = self.num_layers
for i in range(UpperCamelCase__ ):
A = {0: 'batch', 2: 'past_sequence + sequence'}
A = {0: 'batch', 2: 'past_sequence + sequence'}
else:
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def UpperCamelCase ( self : int ):
if self.task in ["default", "seq2seq-lm"]:
A = super().outputs
else:
A = super(UpperCamelCase__ , self ).outputs
if self.use_past:
A , A = self.num_layers
for i in range(UpperCamelCase__ ):
A = {0: 'batch', 2: 'past_sequence + sequence'}
A = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def UpperCamelCase ( self : int , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Generate decoder inputs
A = seq_length if not self.use_past else 1
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
A = dict(**UpperCamelCase__ , **UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A , A = common_inputs['input_ids'].shape
A = common_inputs['decoder_input_ids'].shape[1]
A , A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = decoder_seq_length + 3
A = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(UpperCamelCase__ , UpperCamelCase__ )] , dim=1 )
A = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A , A = self.num_layers
A = min(UpperCamelCase__ , UpperCamelCase__ )
A = max(UpperCamelCase__ , UpperCamelCase__ ) - min_num_layers
A = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(UpperCamelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
) )
# TODO: test this.
A = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(UpperCamelCase__ , UpperCamelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) )
return common_inputs
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A , A = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
A = seqlen + 2
A , A = self.num_layers
A , A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = common_inputs['attention_mask'].dtype
A = torch.cat(
[common_inputs['attention_mask'], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
A = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(UpperCamelCase__ )
]
return common_inputs
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A = tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
A = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
A = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
A = dict(tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ ) )
return common_inputs
def UpperCamelCase ( self : Any , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
A = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
elif self.task == "causal-lm":
A = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
else:
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
return common_inputs
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple ):
if self.task in ["default", "seq2seq-lm"]:
A = super()._flatten_past_key_values_(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
A = super(UpperCamelCase__ , self )._flatten_past_key_values_(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
| 699 | 0 |
"""simple docstring"""
from __future__ import annotations
import pandas as pd
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) ->list[int]:
"""simple docstring"""
__lowercase : int = [0] * no_of_processes
__lowercase : Optional[int] = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(_lowerCamelCase ):
__lowercase : List[str] = burst_time[i]
__lowercase : Optional[int] = 0
__lowercase : int = 0
__lowercase : Optional[Any] = 9_99_99_99_99
__lowercase : Optional[Any] = 0
__lowercase : Optional[Any] = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(_lowerCamelCase ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
__lowercase : Dict = remaining_time[j]
__lowercase : List[Any] = j
__lowercase : Dict = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
__lowercase : Optional[Any] = remaining_time[short]
if minm == 0:
__lowercase : str = 9_99_99_99_99
if remaining_time[short] == 0:
complete += 1
__lowercase : Optional[int] = False
# Find finish time of current process
__lowercase : List[Any] = increment_time + 1
# Calculate waiting time
__lowercase : Optional[Any] = finish_time - arrival_time[short]
__lowercase : Optional[Any] = finar - burst_time[short]
if waiting_time[short] < 0:
__lowercase : int = 0
# Increment time
increment_time += 1
return waiting_time
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) ->list[int]:
"""simple docstring"""
__lowercase : Dict = [0] * no_of_processes
for i in range(_lowerCamelCase ):
__lowercase : Optional[Any] = burst_time[i] + waiting_time[i]
return turn_around_time
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) ->None:
"""simple docstring"""
__lowercase : str = 0
__lowercase : List[str] = 0
for i in range(_lowerCamelCase ):
__lowercase : Tuple = total_waiting_time + waiting_time[i]
__lowercase : Union[str, Any] = total_turn_around_time + turn_around_time[i]
print(F'Average waiting time = {total_waiting_time / no_of_processes:.5f}' )
print("Average turn around time =", total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('Enter how many process you want to analyze')
__A : Dict = int(input())
__A : Dict = [0] * no_of_processes
__A : List[str] = [0] * no_of_processes
__A : List[Any] = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('Enter the arrival time and burst time for process:--' + str(i + 1))
__A, __A : int = map(int, input().split())
__A : Optional[Any] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__A : List[Any] = burst_time
__A : str = no_of_processes
__A : Tuple = waiting_time
__A : Union[str, Any] = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
__A : Optional[int] = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'Process',
'BurstTime',
'ArrivalTime',
'WaitingTime',
'TurnAroundTime',
],
)
# Printing the dataFrame
pd.set_option('display.max_rows', fcfs.shape[0] + 1)
print(fcfs)
| 575 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE : List[str] = '''BridgeTowerImageProcessor'''
SCREAMING_SNAKE_CASE : Tuple = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ):
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCamelCase__ : Union[bool, str, TruncationStrategy] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : List[Any] , ):
A = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
# add pixel_values + pixel_mask
A = self.image_processor(
UpperCamelCase__ , return_tensors=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_center_crop=UpperCamelCase__ , **UpperCamelCase__ )
encoding.update(UpperCamelCase__ )
return encoding
def UpperCamelCase ( self : Dict , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Any ):
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def UpperCamelCase ( self : int , *UpperCamelCase__ : int , **UpperCamelCase__ : List[str] ):
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def UpperCamelCase ( self : Any ):
A = self.tokenizer.model_input_names
A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 699 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : int = {
'''microsoft/biogpt''': '''https://huggingface.co/microsoft/biogpt/resolve/main/config.json''',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class __magic_name__ ( __lowercase ):
"""simple docstring"""
__UpperCamelCase = '''biogpt'''
def __init__( self :Optional[Any] , snake_case :str=42_384 , snake_case :Tuple=1_024 , snake_case :Dict=24 , snake_case :Any=16 , snake_case :str=4_096 , snake_case :Tuple="gelu" , snake_case :List[str]=0.1 , snake_case :Any=0.1 , snake_case :Tuple=1_024 , snake_case :List[Any]=0.02 , snake_case :Dict=1e-12 , snake_case :Any=True , snake_case :List[str]=True , snake_case :Optional[Any]=0.0 , snake_case :Optional[Any]=0.0 , snake_case :Any=1 , snake_case :List[str]=0 , snake_case :Optional[Any]=2 , **snake_case :List[Any] , ):
'''simple docstring'''
A_ : Dict = vocab_size
A_ : Optional[int] = max_position_embeddings
A_ : Union[str, Any] = hidden_size
A_ : List[str] = num_hidden_layers
A_ : str = num_attention_heads
A_ : int = intermediate_size
A_ : Optional[int] = hidden_act
A_ : Optional[Any] = hidden_dropout_prob
A_ : str = attention_probs_dropout_prob
A_ : Dict = initializer_range
A_ : Optional[int] = layer_norm_eps
A_ : List[str] = scale_embedding
A_ : Optional[Any] = use_cache
A_ : Dict = layerdrop
A_ : Any = activation_dropout
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 454 |
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> str:
return "\n".join(
f'''{number} * {i} = {number * i}''' for i in range(1, number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 699 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( __lowercase ):
__UpperCAmelCase : Any = ['''image_processor''', '''tokenizer''']
__UpperCAmelCase : List[str] = '''BridgeTowerImageProcessor'''
__UpperCAmelCase : Tuple = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = True , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = 0 , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = True , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> int:
'''simple docstring'''
snake_case : Dict = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
# add pixel_values + pixel_mask
snake_case : Tuple = self.image_processor(
UpperCamelCase__ , return_tensors=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_center_crop=UpperCamelCase__ , **UpperCamelCase__ )
encoding.update(UpperCamelCase__ )
return encoding
def lowerCamelCase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
snake_case : Any = self.tokenizer.model_input_names
snake_case : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 178 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _UpperCAmelCase ( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self : Any , UpperCamelCase__ : int = 128 , UpperCamelCase__ : int = 256 , UpperCamelCase__ : float = 2_000.0 , UpperCamelCase__ : int = 768 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 64 , UpperCamelCase__ : int = 2048 , UpperCamelCase__ : float = 0.1 , ):
super().__init__()
A = nn.Sequential(
nn.Linear(UpperCamelCase__ , d_model * 4 , bias=UpperCamelCase__ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=UpperCamelCase__ ) , nn.SiLU() , )
A = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
A = False
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Dropout(p=UpperCamelCase__ )
A = nn.ModuleList()
for lyr_num in range(UpperCamelCase__ ):
# FiLM conditional T5 decoder
A = DecoderLayer(d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ )
self.decoders.append(UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ )
A = nn.Dropout(p=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : int ):
A = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] ):
A , A , A = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
A = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
A = self.conditioning_emb(UpperCamelCase__ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
A = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
A = torch.broadcast_to(
torch.arange(UpperCamelCase__ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
A = self.position_encoding(UpperCamelCase__ )
A = self.continuous_inputs_projection(UpperCamelCase__ )
inputs += position_encodings
A = self.dropout(UpperCamelCase__ )
# decoder: No padding present.
A = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
A = [(x, self.encoder_decoder_mask(UpperCamelCase__ , UpperCamelCase__ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
A = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
A = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
A = lyr(
UpperCamelCase__ , conditioning_emb=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )[0]
A = self.decoder_norm(UpperCamelCase__ )
A = self.post_dropout(UpperCamelCase__ )
A = self.spec_out(UpperCamelCase__ )
return spec_out
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]=1e-6 ):
super().__init__()
A = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , dropout_rate=UpperCamelCase__ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ ) )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[Any]=None , ):
A = self.layer[0](
UpperCamelCase__ , conditioning_emb=UpperCamelCase__ , attention_mask=UpperCamelCase__ , )
if encoder_hidden_states is not None:
A = torch.where(encoder_attention_mask > 0 , 0 , -1e1_0 ).to(
encoder_hidden_states.dtype )
A = self.layer[1](
UpperCamelCase__ , key_value_states=UpperCamelCase__ , attention_mask=UpperCamelCase__ , )
# Apply Film Conditional Feed Forward layer
A = self.layer[-1](UpperCamelCase__ , UpperCamelCase__ )
return (hidden_states,)
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict ):
super().__init__()
A = TaLayerNorm(UpperCamelCase__ )
A = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase__ )
A = Attention(query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , out_bias=UpperCamelCase__ , scale_qk=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Tuple=None , ):
# pre_self_attention_layer_norm
A = self.layer_norm(UpperCamelCase__ )
if conditioning_emb is not None:
A = self.FiLMLayer(UpperCamelCase__ , UpperCamelCase__ )
# Self-attention block
A = self.attention(UpperCamelCase__ )
A = hidden_states + self.dropout(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ):
super().__init__()
A = Attention(query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , out_bias=UpperCamelCase__ , scale_qk=UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ , eps=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[str]=None , ):
A = self.layer_norm(UpperCamelCase__ )
A = self.attention(
UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , attention_mask=attention_mask.squeeze(1 ) , )
A = hidden_states + self.dropout(UpperCamelCase__ )
return layer_output
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any ):
super().__init__()
A = TaDenseGatedActDense(d_model=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ )
A = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ , eps=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any=None ):
A = self.layer_norm(UpperCamelCase__ )
if conditioning_emb is not None:
A = self.film(UpperCamelCase__ , UpperCamelCase__ )
A = self.DenseReluDense(UpperCamelCase__ )
A = hidden_states + self.dropout(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
A = NewGELUActivation()
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] ):
A = self.act(self.wi_a(UpperCamelCase__ ) )
A = self.wi_a(UpperCamelCase__ )
A = hidden_gelu * hidden_linear
A = self.dropout(UpperCamelCase__ )
A = self.wo(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=1e-6 ):
super().__init__()
A = nn.Parameter(torch.ones(UpperCamelCase__ ) )
A = eps
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : int ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
A = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=UpperCamelCase__ )
A = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
A = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def UpperCamelCase ( self : Any , UpperCamelCase__ : torch.Tensor ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(UpperCamelCase__ , 3.0 )) ))
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , out_features * 2 , bias=UpperCamelCase__ )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] ):
A = self.scale_bias(UpperCamelCase__ )
A , A = torch.chunk(UpperCamelCase__ , 2 , -1 )
A = x * (1 + scale) + shift
return x
| 699 | 0 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowerCAmelCase__ :
def __init__( self : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[int]=13 , UpperCamelCase_ : Optional[Any]=30 , UpperCamelCase_ : Union[str, Any]=2 , UpperCamelCase_ : Dict=3 , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : List[str]=True , UpperCamelCase_ : List[str]=32 , UpperCamelCase_ : Any=2 , UpperCamelCase_ : Tuple=4 , UpperCamelCase_ : Dict=37 , UpperCamelCase_ : Optional[int]="gelu" , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : str=10 , UpperCamelCase_ : List[str]=0.02 , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Tuple=2 , ) -> Any:
"""simple docstring"""
lowerCamelCase_ : List[str] = parent
lowerCamelCase_ : Tuple = batch_size
lowerCamelCase_ : int = image_size
lowerCamelCase_ : Any = patch_size
lowerCamelCase_ : Union[str, Any] = num_channels
lowerCamelCase_ : Optional[Any] = is_training
lowerCamelCase_ : List[Any] = use_labels
lowerCamelCase_ : List[str] = hidden_size
lowerCamelCase_ : Optional[int] = num_hidden_layers
lowerCamelCase_ : List[Any] = num_attention_heads
lowerCamelCase_ : List[Any] = intermediate_size
lowerCamelCase_ : Union[str, Any] = hidden_act
lowerCamelCase_ : Dict = hidden_dropout_prob
lowerCamelCase_ : List[Any] = attention_probs_dropout_prob
lowerCamelCase_ : int = type_sequence_label_size
lowerCamelCase_ : Tuple = initializer_range
lowerCamelCase_ : Dict = scope
lowerCamelCase_ : Any = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowerCamelCase_ : Tuple = (image_size // patch_size) ** 2
lowerCamelCase_ : Tuple = num_patches + 2
def __UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ : Dict = None
if self.use_labels:
lowerCamelCase_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : str ) -> int:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __UpperCamelCase ( self : str , UpperCamelCase_ : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : List[Any] = TFDeiTModel(config=UpperCamelCase__ )
lowerCamelCase_ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ : List[str] = TFDeiTForMaskedImageModeling(config=UpperCamelCase__ )
lowerCamelCase_ : int = model(UpperCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase_ : Union[str, Any] = 1
lowerCamelCase_ : Optional[Any] = TFDeiTForMaskedImageModeling(UpperCamelCase__ )
lowerCamelCase_ : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __UpperCamelCase ( self : List[str] , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : Dict = self.type_sequence_label_size
lowerCamelCase_ : str = TFDeiTForImageClassification(UpperCamelCase__ )
lowerCamelCase_ : Dict = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase_ : str = 1
lowerCamelCase_ : Tuple = TFDeiTForImageClassification(UpperCamelCase__ )
lowerCamelCase_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ : Optional[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : str = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] = config_and_inputs
lowerCamelCase_ : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( __lowercase ,__lowercase ,unittest.TestCase ):
A = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
A = (
{
'''feature-extraction''': TFDeiTModel,
'''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
A = False
A = False
A = False
A = False
def __UpperCamelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ : List[str] = TFDeiTModelTester(self )
lowerCamelCase_ : Any = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def __UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
pass
def __UpperCamelCase ( self : int ) -> Any:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : Union[str, Any] = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowerCamelCase_ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , tf.keras.layers.Dense ) )
def __UpperCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : List[str] = model_class(UpperCamelCase__ )
lowerCamelCase_ : List[str] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ : Optional[int] = [*signature.parameters.keys()]
lowerCamelCase_ : Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ )
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def __UpperCamelCase ( self : int , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any]=False ) -> Any:
"""simple docstring"""
lowerCamelCase_ : Any = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ : Union[str, Any] = TFDeiTModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def __snake_case ():
"""simple docstring"""
lowerCamelCase_ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : int = TFDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
lowerCamelCase_ : Optional[Any] = self.default_image_processor
lowerCamelCase_ : Tuple = prepare_img()
lowerCamelCase_ : Optional[int] = image_processor(images=UpperCamelCase__ , return_tensors='''tf''' )
# forward pass
lowerCamelCase_ : Dict = model(**UpperCamelCase__ )
# verify the logits
lowerCamelCase_ : str = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase_ : str = tf.constant([-1.0266, 0.1912, -1.2861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 501 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
_UpperCAmelCase = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
_UpperCAmelCase = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ):
A = WATERMARK_BITS
A = WatermarkEncoder()
self.encoder.set_watermark('bits' , self.watermark )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : torch.FloatTensor ):
# can't encode images that are smaller than 256
if images.shape[-1] < 256:
return images
A = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
A = [self.encoder.encode(UpperCamelCase__ , 'dwtDct' ) for image in images]
A = torch.from_numpy(np.array(UpperCamelCase__ ) ).permute(0 , 3 , 1 , 2 )
A = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 699 | 0 |
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowercase = 16
_lowercase = 32
def lowerCAmelCase__ ( UpperCamelCase_ : Accelerator , UpperCamelCase_ : DatasetDict , UpperCamelCase_ : List[int] , UpperCamelCase_ : List[int] , UpperCamelCase_ : int = 1_6 )-> Dict:
A__ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
A__ = DatasetDict(
{
'''train''': dataset['''train'''].select(UpperCamelCase_ ),
'''validation''': dataset['''train'''].select(UpperCamelCase_ ),
'''test''': dataset['''validation'''],
} )
def tokenize_function(UpperCamelCase_ : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
A__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A__ = datasets.map(
UpperCamelCase_ , batched=UpperCamelCase_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(UpperCamelCase_ : Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A__ = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A__ = 1_6
elif accelerator.mixed_precision != "no":
A__ = 8
else:
A__ = None
return tokenizer.pad(
UpperCamelCase_ , padding='''longest''' , max_length=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_tensors='''pt''' , )
# Instantiate dataloaders.
A__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ )
A__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ )
A__ = DataLoader(
tokenized_datasets['''test'''] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ )
return train_dataloader, eval_dataloader, test_dataloader
def lowerCAmelCase__ ( UpperCamelCase_ : Dict , UpperCamelCase_ : List[str] )-> Tuple:
# New Code #
A__ = []
# Download the dataset
A__ = load_dataset('''glue''' , '''mrpc''' )
# Create our splits
A__ = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
A__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config['''lr''']
A__ = int(config['''num_epochs'''] )
A__ = int(config['''seed'''] )
A__ = int(config['''batch_size'''] )
A__ = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
A__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A__ = batch_size // MAX_GPU_BATCH_SIZE
A__ = MAX_GPU_BATCH_SIZE
set_seed(UpperCamelCase_ )
# New Code #
# Create our folds:
A__ = kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] )
A__ = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(UpperCamelCase_ ):
A__ , A__ , A__ = get_fold_dataloaders(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=UpperCamelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A__ = model.to(accelerator.device )
# Instantiate optimizer
A__ = AdamW(params=model.parameters() , lr=UpperCamelCase_ )
# Instantiate scheduler
A__ = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase_ , num_warmup_steps=1_0_0 , num_training_steps=(len(UpperCamelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Now we train the model
for epoch in range(UpperCamelCase_ ):
model.train()
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A__ = model(**UpperCamelCase_ )
A__ = outputs.loss
A__ = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**UpperCamelCase_ )
A__ = outputs.logits.argmax(dim=-1 )
A__ , A__ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=UpperCamelCase_ , references=UpperCamelCase_ , )
A__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , UpperCamelCase_ )
# New Code #
# We also run predictions on the test set at the very end
A__ = []
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**UpperCamelCase_ )
A__ = outputs.logits
A__ , A__ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(UpperCamelCase_ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
A__ = torch.cat(UpperCamelCase_ , dim=0 )
A__ = torch.stack(UpperCamelCase_ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
A__ = metric.compute(predictions=UpperCamelCase_ , references=UpperCamelCase_ )
accelerator.print('''Average test metrics from all folds:''' , UpperCamelCase_ )
def lowerCAmelCase__ ( )-> int:
A__ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=UpperCamelCase_ , default=UpperCamelCase_ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
# New Code #
parser.add_argument('''--num_folds''' , type=UpperCamelCase_ , default=3 , help='''The number of splits to perform across the dataset''' )
A__ = parser.parse_args()
A__ = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
main()
| 632 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
_UpperCAmelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : Dict, lowerCAmelCase : Optional[int], lowerCAmelCase : List[Any], lowerCAmelCase : str ) -> int:
for attribute in key.split('.' ):
A = getattr(lowerCAmelCase, lowerCAmelCase )
if weight_type is not None:
A = getattr(lowerCAmelCase, lowerCAmelCase ).shape
else:
A = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
A = value
elif weight_type == "weight_g":
A = value
elif weight_type == "weight_v":
A = value
elif weight_type == "bias":
A = value
else:
A = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __UpperCamelCase (lowerCAmelCase : List[str], lowerCAmelCase : Optional[int] ) -> Dict:
A = []
A = fairseq_model.state_dict()
A = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
A = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, hf_model.config.feat_extract_norm == 'group', )
A = True
else:
for key, mapped_key in MAPPING.items():
A = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
A = True
if "*" in mapped_key:
A = name.split(lowerCAmelCase )[0].split('.' )[-2]
A = mapped_key.replace('*', lowerCAmelCase )
if "weight_g" in name:
A = 'weight_g'
elif "weight_v" in name:
A = 'weight_v'
elif "bias" in name:
A = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A = 'weight'
else:
A = None
set_recursively(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
continue
if not is_used:
unused_weights.append(lowerCAmelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : Tuple, lowerCAmelCase : List[Any], lowerCAmelCase : int ) -> Dict:
A = full_name.split('conv_layers.' )[-1]
A = name.split('.' )
A = int(items[0] )
A = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCAmelCase )
@torch.no_grad()
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : Dict, lowerCAmelCase : Union[str, Any]=None, lowerCAmelCase : str=None, lowerCAmelCase : List[Any]=True ) -> Union[str, Any]:
if config_path is not None:
A = UniSpeechSatConfig.from_pretrained(lowerCAmelCase )
else:
A = UniSpeechSatConfig()
A = ''
if is_finetuned:
A = UniSpeechSatForCTC(lowerCAmelCase )
else:
A = UniSpeechSatForPreTraining(lowerCAmelCase )
A , A , A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
A = model[0].eval()
recursively_load_weights(lowerCAmelCase, lowerCAmelCase )
hf_wavavec.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCAmelCase = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 699 | 0 |
def UpperCAmelCase_ ( __lowerCAmelCase ) -> "list[int]":
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
__lowercase : List[Any] = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
__lowercase : Optional[Any] = 1
if upper_limit > 0:
__lowercase : Dict = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(__lowerCAmelCase ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("\n********* Catalan Numbers Using Dynamic Programming ************\n")
print("\n*** Enter -1 at any time to quit ***")
print("\nEnter the upper limit (≥ 0) for the Catalan number sequence: ", end="")
try:
while True:
__lowerCAmelCase : Dict = int(input().strip())
if N < 0:
print("\n********* Goodbye!! ************")
break
else:
print(F'The Catalan numbers from 0 through {N} are:')
print(catalan_numbers(N))
print("Try another upper limit for the sequence: ", end="")
except (NameError, ValueError):
print("\n********* Invalid input, goodbye! ************\n")
import doctest
doctest.testmod()
| 509 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_UpperCAmelCase = TypeVar("T")
class _UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : T ):
A = data
A = None
def __str__( self : Optional[int] ):
return f'''{self.data}'''
class _UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple ):
A = None
def __iter__( self : int ):
A = self.top
while node:
yield node.data
A = node.next
def __str__( self : Any ):
return "->".join([str(UpperCamelCase__ ) for item in self] )
def __len__( self : Dict ):
return len(tuple(iter(self ) ) )
def UpperCamelCase ( self : List[str] ):
return self.top is None
def UpperCamelCase ( self : Dict , UpperCamelCase__ : T ):
A = Node(UpperCamelCase__ )
if not self.is_empty():
A = self.top
A = node
def UpperCamelCase ( self : Dict ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , UpperCamelCase__ )
A = self.top
A = self.top.next
return pop_node.data
def UpperCamelCase ( self : List[str] ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def UpperCamelCase ( self : List[str] ):
A = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 699 | 0 |
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowercase_ = importlib.util.find_spec("s3fs") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowercase_ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def A_ ( lowercase ) -> str:
"""simple docstring"""
if "://" in dataset_path:
UpperCAmelCase_ : List[str] = dataset_path.split("""://""" )[1]
return dataset_path
def A_ ( lowercase ) -> bool:
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def A_ ( lowercase , lowercase , lowercase ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = not is_remote_filesystem(lowercase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowercase ) , fs._strip_protocol(lowercase ) )
else:
fs.mv(lowercase , lowercase , recursive=lowercase )
def A_ ( ) -> None:
"""simple docstring"""
if hasattr(fsspec.asyn , """reset_lock""" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
UpperCAmelCase_ : str = None
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : Tuple = threading.Lock()
| 470 |
from __future__ import annotations
import math
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : bool, lowerCAmelCase : list[int], lowerCAmelCase : float ) -> int:
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if not scores:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1, node_index * 2, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), )
if is_max
else min(
minimax(depth + 1, node_index * 2, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), )
)
def __UpperCamelCase () -> None:
A = [90, 23, 6, 33, 21, 65, 123, 34_423]
A = math.log(len(lowerCAmelCase ), 2 )
print(f'''Optimal value : {minimax(0, 0, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 699 | 0 |
'''simple docstring'''
from itertools import product
def UpperCamelCase ( _lowerCamelCase : int , _lowerCamelCase : int ):
A__ = sides_number
A__ = max_face_number * dice_number
A__ = [0] * (max_total + 1)
A__ = 1
A__ = range(_lowerCamelCase , max_face_number + 1 )
for dice_numbers in product(_lowerCamelCase , repeat=_lowerCamelCase ):
A__ = sum(_lowerCamelCase )
totals_frequencies[total] += 1
return totals_frequencies
def UpperCamelCase ( ):
A__ = total_frequency_distribution(
sides_number=4 , dice_number=9 )
A__ = total_frequency_distribution(
sides_number=6 , dice_number=6 )
A__ = 0
A__ = 9
A__ = 4 * 9
A__ = 6
for peter_total in range(_lowerCamelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
A__ = (4**9) * (6**6)
A__ = peter_wins_count / total_games_number
A__ = round(_lowerCamelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 440 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : PreTrainedTokenizer, lowerCAmelCase : int, lowerCAmelCase : Optional[int] = None, ) -> Dict:
A = {}
if train_file is not None:
A = [train_file]
if eval_file is not None:
A = [eval_file]
if test_file is not None:
A = [test_file]
A = datasets.load_dataset('csv', data_files=lowerCAmelCase )
A = list(ds[list(files.keys() )[0]].features.keys() )
A = features_name.pop(lowerCAmelCase )
A = list(set(ds[list(files.keys() )[0]][label_name] ) )
A = {label: i for i, label in enumerate(lowerCAmelCase )}
A = tokenizer.model_input_names
A = {}
if len(lowerCAmelCase ) == 1:
for k in files.keys():
A = ds[k].map(
lambda lowerCAmelCase : tokenizer.batch_encode_plus(
example[features_name[0]], truncation=lowerCAmelCase, max_length=lowerCAmelCase, padding='max_length' ), batched=lowerCAmelCase, )
elif len(lowerCAmelCase ) == 2:
for k in files.keys():
A = ds[k].map(
lambda lowerCAmelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]), truncation=lowerCAmelCase, max_length=lowerCAmelCase, padding='max_length', ), batched=lowerCAmelCase, )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
A = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
A = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
A = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = field(metadata={'''help''': '''Which column contains the label'''} )
SCREAMING_SNAKE_CASE : str = field(default=__lowercase , metadata={'''help''': '''The path of the training file'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase , metadata={'''help''': '''The path of the development file'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase , metadata={'''help''': '''The path of the test file'''} )
SCREAMING_SNAKE_CASE : int = field(
default=1_28 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE : bool = field(default=__lowercase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def __UpperCamelCase () -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
A , A , A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, )
logger.info(
f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
f'''16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
A , A , A , A = get_tfds(
train_file=data_args.train_file, eval_file=data_args.dev_file, test_file=data_args.test_file, tokenizer=lowerCAmelCase, label_column_id=data_args.label_column_id, max_seq_length=data_args.max_seq_length, )
A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=len(lowerCAmelCase ), labelaid=lowerCAmelCase, idalabel={id: label for label, id in labelaid.items()}, finetuning_task='text-classification', cache_dir=model_args.cache_dir, )
with training_args.strategy.scope():
A = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path, from_pt=bool('.bin' in model_args.model_name_or_path ), config=lowerCAmelCase, cache_dir=model_args.cache_dir, )
def compute_metrics(lowerCAmelCase : EvalPrediction ) -> Dict:
A = np.argmax(p.predictions, axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
A = TFTrainer(
model=lowerCAmelCase, args=lowerCAmelCase, train_dataset=lowerCAmelCase, eval_dataset=lowerCAmelCase, compute_metrics=lowerCAmelCase, )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A = trainer.evaluate()
A = os.path.join(training_args.output_dir, 'eval_results.txt' )
with open(lowerCAmelCase, 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
results.update(lowerCAmelCase )
return results
if __name__ == "__main__":
main()
| 699 | 0 |
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
UpperCamelCase = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
UpperCamelCase = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(1_0000):
out_file.write(data)
UpperCamelCase = BeautifulSoup(res.text, 'html.parser')
UpperCamelCase = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(F"""https://google.com{link.get('href')}""")
| 61 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 699 | 0 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
_lowercase : Dict = logging.get_logger(__name__)
def lowerCamelCase__ ( A : List[str] ):
'''simple docstring'''
UpperCAmelCase = R'''\w+[.]\d+'''
UpperCAmelCase = re.findall(A , A )
for pat in pats:
UpperCAmelCase = key.replace(A , '''_'''.join(pat.split('''.''' ) ) )
return key
def lowerCamelCase__ ( A : Optional[int] , A : Dict , A : Dict ):
'''simple docstring'''
UpperCAmelCase = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCAmelCase = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCAmelCase = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCAmelCase = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCAmelCase = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
UpperCAmelCase = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCamelCase__ ( A : Tuple , A : Any , A : str=42 ):
'''simple docstring'''
UpperCAmelCase = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCAmelCase = flax_model.init_weights(PRNGKey(A ) )
UpperCAmelCase = flatten_dict(A )
UpperCAmelCase = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase = rename_key(A )
UpperCAmelCase = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
UpperCAmelCase , UpperCAmelCase = rename_key_and_reshape_tensor(A , A , A )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
UpperCAmelCase = jnp.asarray(A )
return unflatten_dict(A )
| 210 |
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> Optional[int]:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCAmelCase, int(b / 2 ) ) * actual_power(lowerCAmelCase, int(b / 2 ) )
else:
return a * actual_power(lowerCAmelCase, int(b / 2 ) ) * actual_power(lowerCAmelCase, int(b / 2 ) )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> float:
if b < 0:
return 1 / actual_power(lowerCAmelCase, lowerCAmelCase )
return actual_power(lowerCAmelCase, lowerCAmelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 699 | 0 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
__snake_case = Mapping[str, np.ndarray]
__snake_case = Mapping[str, Any] # Is a nested dict.
__snake_case = 0.01
@dataclasses.dataclass(frozen=__lowercase )
class lowercase__ :
A__ : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
A__ : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
A__ : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
A__ : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
A__ : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
A__ : Optional[np.ndarray] =None
# Optional remark about the protein. Included as a comment in output PDB
# files
A__ : Optional[str] =None
# Templates used to generate this protein (prediction-only)
A__ : Optional[Sequence[str]] =None
# Chain corresponding to each parent
A__ : Optional[Sequence[int]] =None
def _lowercase ( UpperCamelCase_ ) -> Protein:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = r'(\[[A-Z]+\]\n)'
SCREAMING_SNAKE_CASE__ = [tag.strip() for tag in re.split(UpperCamelCase_ , UpperCamelCase_ ) if len(UpperCamelCase_ ) > 0]
SCREAMING_SNAKE_CASE__ = zip(tags[0::2] , [l.split('\n' ) for l in tags[1::2]] )
SCREAMING_SNAKE_CASE__ = ['N', 'CA', 'C']
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
for g in groups:
if "[PRIMARY]" == g[0]:
SCREAMING_SNAKE_CASE__ = g[1][0].strip()
for i in range(len(UpperCamelCase_ ) ):
if seq[i] not in residue_constants.restypes:
SCREAMING_SNAKE_CASE__ = 'X' # FIXME: strings are immutable
SCREAMING_SNAKE_CASE__ = np.array(
[residue_constants.restype_order.get(UpperCamelCase_ , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
SCREAMING_SNAKE_CASE__ = []
for axis in range(3 ):
tertiary.append(list(map(UpperCamelCase_ , g[1][axis].split() ) ) )
SCREAMING_SNAKE_CASE__ = np.array(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
SCREAMING_SNAKE_CASE__ = np.array(list(map({'-': 0, '+': 1}.get , g[1][0].strip() ) ) )
SCREAMING_SNAKE_CASE__ = np.zeros(
(
len(UpperCamelCase_ ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=UpperCamelCase_ , atom_mask=UpperCamelCase_ , aatype=UpperCamelCase_ , residue_index=np.arange(len(UpperCamelCase_ ) ) , b_factors=UpperCamelCase_ , )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ = 0 ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = prot.remark
if remark is not None:
pdb_headers.append(F'REMARK {remark}' )
SCREAMING_SNAKE_CASE__ = prot.parents
SCREAMING_SNAKE_CASE__ = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
SCREAMING_SNAKE_CASE__ = [p for i, p in zip(UpperCamelCase_ , UpperCamelCase_ ) if i == chain_id]
if parents is None or len(UpperCamelCase_ ) == 0:
SCREAMING_SNAKE_CASE__ = ['N/A']
pdb_headers.append(F'PARENT {" ".join(UpperCamelCase_ )}' )
return pdb_headers
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = pdb_str.split('\n' )
SCREAMING_SNAKE_CASE__ = prot.remark
if remark is not None:
out_pdb_lines.append(F'REMARK {remark}' )
SCREAMING_SNAKE_CASE__ = 42
if prot.parents is not None and len(prot.parents ) > 0:
SCREAMING_SNAKE_CASE__ = []
if prot.parents_chain_index is not None:
SCREAMING_SNAKE_CASE__ = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(UpperCamelCase_ ) , [] )
parent_dict[str(UpperCamelCase_ )].append(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = max([int(UpperCamelCase_ ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
SCREAMING_SNAKE_CASE__ = parent_dict.get(str(UpperCamelCase_ ) , ['N/A'] )
parents_per_chain.append(UpperCamelCase_ )
else:
parents_per_chain.append(list(prot.parents ) )
else:
SCREAMING_SNAKE_CASE__ = [['N/A']]
def make_parent_line(UpperCamelCase_ ) -> str:
return F'PARENT {" ".join(UpperCamelCase_ )}'
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
SCREAMING_SNAKE_CASE__ = 0
for i, l in enumerate(UpperCamelCase_ ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(UpperCamelCase_ )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = parents_per_chain[chain_counter]
else:
SCREAMING_SNAKE_CASE__ = ['N/A']
out_pdb_lines.append(make_parent_line(UpperCamelCase_ ) )
return "\n".join(UpperCamelCase_ )
def _lowercase ( UpperCamelCase_ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = residue_constants.restypes + ['X']
def res_atoa(UpperCamelCase_ ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , 'UNK' )
SCREAMING_SNAKE_CASE__ = residue_constants.atom_types
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = prot.atom_mask
SCREAMING_SNAKE_CASE__ = prot.aatype
SCREAMING_SNAKE_CASE__ = prot.atom_positions
SCREAMING_SNAKE_CASE__ = prot.residue_index.astype(np.intaa )
SCREAMING_SNAKE_CASE__ = prot.b_factors
SCREAMING_SNAKE_CASE__ = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('Invalid aatypes.' )
SCREAMING_SNAKE_CASE__ = get_pdb_headers(UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
pdb_lines.extend(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = aatype.shape[0]
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = string.ascii_uppercase
SCREAMING_SNAKE_CASE__ = None
# Add all atom sites.
for i in range(UpperCamelCase_ ):
SCREAMING_SNAKE_CASE__ = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(UpperCamelCase_ , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
SCREAMING_SNAKE_CASE__ = 'ATOM'
SCREAMING_SNAKE_CASE__ = atom_name if len(UpperCamelCase_ ) == 4 else F' {atom_name}'
SCREAMING_SNAKE_CASE__ = ''
SCREAMING_SNAKE_CASE__ = ''
SCREAMING_SNAKE_CASE__ = 1.00
SCREAMING_SNAKE_CASE__ = atom_name[0] # Protein supports only C, N, O, S, this works.
SCREAMING_SNAKE_CASE__ = ''
SCREAMING_SNAKE_CASE__ = 'A'
if chain_index is not None:
SCREAMING_SNAKE_CASE__ = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
SCREAMING_SNAKE_CASE__ = (
F'{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}'
F'{res_name_a:>3} {chain_tag:>1}'
F'{residue_index[i]:>4}{insertion_code:>1} '
F'{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}'
F'{occupancy:>6.2f}{b_factor:>6.2f} '
F'{element:>2}{charge:>2}'
)
pdb_lines.append(UpperCamelCase_ )
atom_index += 1
SCREAMING_SNAKE_CASE__ = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = chain_index[i + 1]
if should_terminate:
# Close the chain.
SCREAMING_SNAKE_CASE__ = 'TER'
SCREAMING_SNAKE_CASE__ = (
F'{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}'
)
pdb_lines.append(UpperCamelCase_ )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(UpperCamelCase_ , UpperCamelCase_ ) )
pdb_lines.append('END' )
pdb_lines.append('' )
return "\n".join(UpperCamelCase_ )
def _lowercase ( UpperCamelCase_ ) -> np.ndarray:
'''simple docstring'''
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , ) -> Protein:
'''simple docstring'''
return Protein(
aatype=features['aatype'] , atom_positions=result['final_atom_positions'] , atom_mask=result['final_atom_mask'] , residue_index=features['residue_index'] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['final_atom_mask'] ) , chain_index=UpperCamelCase_ , remark=UpperCamelCase_ , parents=UpperCamelCase_ , parents_chain_index=UpperCamelCase_ , )
| 472 |
def __UpperCamelCase (lowerCAmelCase : list[int] ) -> int:
if not numbers:
return 0
if not isinstance(lowerCAmelCase, (list, tuple) ) or not all(
isinstance(lowerCAmelCase, lowerCAmelCase ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
A = A = A = numbers[0]
for i in range(1, len(lowerCAmelCase ) ):
# update the maximum and minimum subarray products
A = numbers[i]
if number < 0:
A , A = min_till_now, max_till_now
A = max(lowerCAmelCase, max_till_now * number )
A = min(lowerCAmelCase, min_till_now * number )
# update the maximum product found till now
A = max(lowerCAmelCase, lowerCAmelCase )
return max_prod
| 699 | 0 |
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
__A : Optional[Any] = 2_0_0
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
__A : Tuple = 5_0
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
__A : Dict = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_0_0_0))
def snake_case__ ( _lowerCamelCase, _lowerCamelCase ) ->tuple[str, float]:
"""simple docstring"""
__lowercase : Union[str, Any] = len([g for position, g in enumerate(_lowerCamelCase ) if g == main_target[position]] )
return (item, float(_lowerCamelCase ))
def snake_case__ ( _lowerCamelCase, _lowerCamelCase ) ->tuple[str, str]:
"""simple docstring"""
__lowercase : str = random.randint(0, len(_lowerCamelCase ) - 1 )
__lowercase : List[str] = parent_a[:random_slice] + parent_a[random_slice:]
__lowercase : Union[str, Any] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def snake_case__ ( _lowerCamelCase, _lowerCamelCase ) ->str:
"""simple docstring"""
__lowercase : List[str] = list(_lowerCamelCase )
if random.uniform(0, 1 ) < MUTATION_PROBABILITY:
__lowercase : Optional[Any] = random.choice(_lowerCamelCase )
return "".join(_lowerCamelCase )
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, ) ->list[str]:
"""simple docstring"""
__lowercase : Tuple = []
# Generate more children proportionally to the fitness score.
__lowercase : Union[str, Any] = int(parent_a[1] * 1_00 ) + 1
__lowercase : int = 10 if child_n >= 10 else child_n
for _ in range(_lowerCamelCase ):
__lowercase : int = population_score[random.randint(0, _lowerCamelCase )][0]
__lowercase ,__lowercase : int = crossover(parent_a[0], _lowerCamelCase )
# Append new string to the population list.
pop.append(mutate(_lowerCamelCase, _lowerCamelCase ) )
pop.append(mutate(_lowerCamelCase, _lowerCamelCase ) )
return pop
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase = True ) ->tuple[int, int, str]:
"""simple docstring"""
if N_POPULATION < N_SELECTED:
__lowercase : Optional[Any] = F'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(_lowerCamelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
__lowercase : Any = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__lowercase : Optional[Any] = F'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(_lowerCamelCase )
# Generate random starting population.
__lowercase : Dict = []
for _ in range(_lowerCamelCase ):
population.append("".join([random.choice(_lowerCamelCase ) for i in range(len(_lowerCamelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
__lowercase ,__lowercase : int = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_lowerCamelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__lowercase : Optional[Any] = [evaluate(_lowerCamelCase, _lowerCamelCase ) for item in population]
# Check if there is a matching evolution.
__lowercase : Optional[Any] = sorted(_lowerCamelCase, key=lambda _lowerCamelCase : x[1], reverse=_lowerCamelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
F'\nGeneration: {generation}'
F'\nTotal Population:{total_population}'
F'\nBest score: {population_score[0][1]}'
F'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__lowercase : Optional[int] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_lowerCamelCase )
# Normalize population score to be between 0 and 1.
__lowercase : List[Any] = [
(item, score / len(_lowerCamelCase )) for item, score in population_score
]
# This is selection
for i in range(_lowerCamelCase ):
population.extend(select(population_score[int(_lowerCamelCase )], _lowerCamelCase, _lowerCamelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_lowerCamelCase ) > N_POPULATION:
break
if __name__ == "__main__":
__A : Union[str, Any] = (
'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'
)
__A : str = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
__A, __A, __A : int = basic(target_str, genes_list)
print(
F"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"""
)
| 575 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 699 | 0 |
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : list[int] , _lowerCAmelCase : int ) -> int:
def count_of_possible_combinations(_lowerCAmelCase : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_lowerCAmelCase )
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : list[int] , _lowerCAmelCase : int ) -> int:
def count_of_possible_combinations_with_dp_array(
_lowerCAmelCase : int , _lowerCAmelCase : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
A_ : Optional[int] = sum(
count_of_possible_combinations_with_dp_array(target - item , _lowerCAmelCase )
for item in array )
A_ : Union[str, Any] = answer
return answer
A_ : Any = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_lowerCAmelCase , _lowerCAmelCase )
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : list[int] , _lowerCAmelCase : int ) -> int:
A_ : Optional[int] = [0] * (target + 1)
A_ : Optional[Any] = 1
for i in range(1 , target + 1 ):
for j in range(_lowerCAmelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : str = 3
_lowerCAmelCase : List[Any] = 5
_lowerCAmelCase : List[Any] = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 454 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = '''biogpt'''
def __init__( self : Optional[Any] , UpperCamelCase__ : str=42384 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : Dict=24 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : str=4096 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Dict=1e-1_2 , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Optional[Any]=2 , **UpperCamelCase__ : List[Any] , ):
A = vocab_size
A = max_position_embeddings
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = scale_embedding
A = use_cache
A = layerdrop
A = activation_dropout
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 699 | 0 |
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class _lowerCAmelCase ( __lowercase , unittest.TestCase ):
__UpperCAmelCase : Optional[int] = DebertaVaTokenizer
__UpperCAmelCase : Optional[Any] = DebertaVaTokenizerFast
__UpperCAmelCase : Any = True
__UpperCAmelCase : str = True
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
snake_case : List[Any] = DebertaVaTokenizer(UpperCamelCase__ , unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase ( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
snake_case : Any = "this is a test"
snake_case : Union[str, Any] = "this is a test"
return input_text, output_text
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : Tuple = "<pad>"
snake_case : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "[PAD]" )
self.assertEqual(len(UpperCamelCase__ ) , 3_0001 )
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : List[str] = " \tHeLLo!how \n Are yoU? "
snake_case : List[str] = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
snake_case : Optional[int] = DebertaVaTokenizer(UpperCamelCase__ , do_lower_case=UpperCamelCase__ )
snake_case : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
snake_case : List[str] = DebertaVaTokenizerFast(UpperCamelCase__ , do_lower_case=UpperCamelCase__ )
snake_case : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
pass
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : List[Any] = "I was born in 92000, and this is falsé."
snake_case : Any = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
snake_case : Tuple = DebertaVaTokenizer(UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
snake_case : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Dict = DebertaVaTokenizerFast(UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
snake_case : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : Optional[Any] = "I was born in 92000, and this is falsé."
snake_case : str = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
snake_case : List[str] = DebertaVaTokenizer(UpperCamelCase__ , do_lower_case=UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
snake_case : Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
snake_case : List[Any] = DebertaVaTokenizerFast(UpperCamelCase__ , do_lower_case=UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
snake_case : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[str] = "I was born in 92000, and this is falsé."
snake_case : Optional[Any] = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
snake_case : Union[str, Any] = DebertaVaTokenizer(UpperCamelCase__ , do_lower_case=UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
snake_case : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Optional[Any] = DebertaVaTokenizerFast(UpperCamelCase__ , do_lower_case=UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
snake_case : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
snake_case : List[str] = "I was born in 92000, and this is falsé."
snake_case : Optional[Any] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
snake_case : Optional[int] = DebertaVaTokenizer(UpperCamelCase__ , do_lower_case=UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
snake_case : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Optional[int] = DebertaVaTokenizerFast(UpperCamelCase__ , do_lower_case=UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
snake_case : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
snake_case : str = " \tHeLLo!how \n Are yoU? "
snake_case : Union[str, Any] = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
snake_case : Union[str, Any] = DebertaVaTokenizer(UpperCamelCase__ , do_lower_case=UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
snake_case : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Optional[Any] = DebertaVaTokenizerFast(UpperCamelCase__ , do_lower_case=UpperCamelCase__ , split_by_punct=UpperCamelCase__ )
snake_case : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : str = self.get_tokenizer()
snake_case : List[Any] = self.get_rust_tokenizer()
snake_case : Optional[int] = "I was born in 92000, and this is falsé."
snake_case : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
snake_case : List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Any = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
snake_case : Optional[Any] = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Any = self.get_rust_tokenizer()
snake_case : Union[str, Any] = tokenizer.encode(UpperCamelCase__ )
snake_case : Optional[Any] = rust_tokenizer.encode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Dict = "This is a test"
snake_case : List[Any] = [13, 1, 4398, 25, 21, 1289]
snake_case : Any = ["▁", "T", "his", "▁is", "▁a", "▁test"]
snake_case : Dict = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
snake_case : Optional[int] = DebertaVaTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
snake_case : Optional[int] = DebertaVaTokenizerFast(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
snake_case : Any = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
snake_case : List[str] = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
snake_case : int = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Any = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
snake_case : List[str] = rust_tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Any = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
# fmt: off
snake_case : Optional[int] = "I was born in 92000, and this is falsé."
snake_case : str = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
snake_case : Tuple = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
snake_case : Any = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
snake_case : Dict = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Dict = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Any = tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
snake_case : List[str] = rust_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
snake_case : List[Any] = rust_tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
snake_case : Dict = rust_tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Union[str, Any] = DebertaVaTokenizer(UpperCamelCase__ )
snake_case : Any = tokenizer.encode("sequence builders" )
snake_case : Dict = tokenizer.encode("multi-sequence build" )
snake_case : Any = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
snake_case : Dict = tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , UpperCamelCase__ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , UpperCamelCase__ , )
@slow
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case : int = {"input_ids": [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , )
| 178 |
import sys
def __UpperCamelCase (lowerCAmelCase : Dict ) -> Dict:
A = len(lowerCAmelCase )
A = [[0 for x in range(lowerCAmelCase )] for x in range(lowerCAmelCase )]
A = [[0 for x in range(lowerCAmelCase )] for x in range(lowerCAmelCase )]
for chain_length in range(2, lowerCAmelCase ):
for a in range(1, n - chain_length + 1 ):
A = a + chain_length - 1
A = sys.maxsize
for c in range(lowerCAmelCase, lowerCAmelCase ):
A = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
A = cost
A = c
return matrix, sol
def __UpperCamelCase (lowerCAmelCase : Optional[Any], lowerCAmelCase : Union[str, Any], lowerCAmelCase : Union[str, Any] ) -> List[str]:
if i == j:
print('A' + str(lowerCAmelCase ), end=' ' )
else:
print('(', end=' ' )
print_optiomal_solution(lowerCAmelCase, lowerCAmelCase, optimal_solution[i][j] )
print_optiomal_solution(lowerCAmelCase, optimal_solution[i][j] + 1, lowerCAmelCase )
print(')', end=' ' )
def __UpperCamelCase () -> List[str]:
A = [30, 35, 15, 5, 10, 20, 25]
A = len(lowerCAmelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
A , A = matrix_chain_order(lowerCAmelCase )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowerCAmelCase, 1, n - 1 )
if __name__ == "__main__":
main()
| 699 | 0 |
'''simple docstring'''
import os
def __snake_case ():
"""simple docstring"""
lowerCamelCase_ : int = os.path.join(os.path.dirname(__UpperCAmelCase ) , '''num.txt''' )
with open(__UpperCAmelCase ) as file_hand:
return str(sum(int(__UpperCAmelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 501 |
from math import isqrt
def __UpperCamelCase (lowerCAmelCase : int ) -> bool:
return all(number % divisor != 0 for divisor in range(2, isqrt(lowerCAmelCase ) + 1 ) )
def __UpperCamelCase (lowerCAmelCase : int = 10**6 ) -> int:
A = 0
A = 1
A = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowerCAmelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 699 | 0 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_lowercase = 16
_lowercase = 32
def lowerCAmelCase__ ( UpperCamelCase_ : Accelerator , UpperCamelCase_ : int = 1_6 , UpperCamelCase_ : str = "bert-base-cased" )-> Union[str, Any]:
A__ = AutoTokenizer.from_pretrained(UpperCamelCase_ )
A__ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(UpperCamelCase_ : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
A__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A__ = datasets.map(
UpperCamelCase_ , batched=UpperCamelCase_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=UpperCamelCase_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(UpperCamelCase_ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCamelCase_ , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' )
return tokenizer.pad(UpperCamelCase_ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
A__ = DataLoader(
tokenized_datasets['''train'''] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ )
A__ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ )
return train_dataloader, eval_dataloader
def lowerCAmelCase__ ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any )-> Union[str, Any]:
# Initialize accelerator
A__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config['''lr''']
A__ = int(config['''num_epochs'''] )
A__ = int(config['''seed'''] )
A__ = int(config['''batch_size'''] )
A__ = args.model_name_or_path
set_seed(UpperCamelCase_ )
A__ , A__ = get_dataloaders(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = AutoModelForSequenceClassification.from_pretrained(UpperCamelCase_ , return_dict=UpperCamelCase_ )
# Instantiate optimizer
A__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A__ = optimizer_cls(params=model.parameters() , lr=UpperCamelCase_ )
if accelerator.state.deepspeed_plugin is not None:
A__ = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
A__ = 1
A__ = (len(UpperCamelCase_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A__ = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase_ , num_warmup_steps=0 , num_training_steps=UpperCamelCase_ , )
else:
A__ = DummyScheduler(UpperCamelCase_ , total_num_steps=UpperCamelCase_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ , A__ , A__ , A__ , A__ = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# We need to keep track of how many total steps we have iterated over
A__ = 0
# We also need to keep track of the stating epoch so files are named properly
A__ = 0
# Now we train the model
A__ = evaluate.load('''glue''' , '''mrpc''' )
A__ = 0
A__ = {}
for epoch in range(UpperCamelCase_ , UpperCamelCase_ ):
model.train()
for step, batch in enumerate(UpperCamelCase_ ):
A__ = model(**UpperCamelCase_ )
A__ = outputs.loss
A__ = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
A__ = 0
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**UpperCamelCase_ )
A__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
A__ , A__ = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(UpperCamelCase_ ) - 1:
A__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
A__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=UpperCamelCase_ , references=UpperCamelCase_ , )
A__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , UpperCamelCase_ )
A__ = eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
A__ = eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( )-> Tuple:
A__ = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=UpperCamelCase_ , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=UpperCamelCase_ , )
parser.add_argument(
'''--output_dir''' , type=UpperCamelCase_ , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--performance_lower_bound''' , type=UpperCamelCase_ , default=UpperCamelCase_ , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , )
parser.add_argument(
'''--num_epochs''' , type=UpperCamelCase_ , default=3 , help='''Number of train epochs.''' , )
A__ = parser.parse_args()
A__ = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
main()
| 632 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
_UpperCAmelCase = logging.get_logger(__name__)
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
def __init__( self : List[str] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Tuple ):
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 699 | 0 |
from __future__ import annotations
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> bool:
__lowercase : List[Any] = get_failure_array(__lowerCAmelCase )
# 2) Step through text searching for pattern
__lowercase , __lowercase : List[Any] = 0, 0 # index into text, pattern
while i < len(__lowerCAmelCase ):
if pattern[j] == text[i]:
if j == (len(__lowerCAmelCase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
__lowercase : str = failure[j - 1]
continue
i += 1
return False
def UpperCAmelCase_ ( __lowerCAmelCase ) -> list[int]:
__lowercase : Optional[int] = [0]
__lowercase : str = 0
__lowercase : Any = 1
while j < len(__lowerCAmelCase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
__lowercase : Union[str, Any] = failure[i - 1]
continue
j += 1
failure.append(__lowerCAmelCase )
return failure
if __name__ == "__main__":
# Test 1)
__lowerCAmelCase : Optional[Any] = "abc1abc12"
__lowerCAmelCase : Any = "alskfjaldsabc1abc1abc12k23adsfabcabc"
__lowerCAmelCase : int = "alskfjaldsk23adsfabcabc"
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__lowerCAmelCase : Union[str, Any] = "ABABX"
__lowerCAmelCase : Tuple = "ABABZABABYABABX"
assert kmp(pattern, text)
# Test 3)
__lowerCAmelCase : Tuple = "AAAB"
__lowerCAmelCase : Optional[int] = "ABAAAAAB"
assert kmp(pattern, text)
# Test 4)
__lowerCAmelCase : Optional[Any] = "abcdabcy"
__lowerCAmelCase : Optional[Any] = "abcxabcdabxabcdabcdabcy"
assert kmp(pattern, text)
# Test 5)
__lowerCAmelCase : Optional[int] = "aabaabaaa"
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 509 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str = "geglu" , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : str = "layer_norm" , UpperCamelCase__ : bool = False , ):
super().__init__()
A = only_cross_attention
A = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
A = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
A = AdaLayerNorm(UpperCamelCase__ , UpperCamelCase__ )
elif self.use_ada_layer_norm_zero:
A = AdaLayerNormZero(UpperCamelCase__ , UpperCamelCase__ )
else:
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
A = Attention(
query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , dropout=UpperCamelCase__ , bias=UpperCamelCase__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=UpperCamelCase__ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
A = (
AdaLayerNorm(UpperCamelCase__ , UpperCamelCase__ )
if self.use_ada_layer_norm
else nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
)
A = Attention(
query_dim=UpperCamelCase__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , dropout=UpperCamelCase__ , bias=UpperCamelCase__ , upcast_attention=UpperCamelCase__ , ) # is self-attn if encoder_hidden_states is none
else:
A = None
A = None
# 3. Feed-forward
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
A = FeedForward(UpperCamelCase__ , dropout=UpperCamelCase__ , activation_fn=UpperCamelCase__ , final_dropout=UpperCamelCase__ )
# let chunk size default to None
A = None
A = 0
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ):
# Sets chunk feed-forward
A = chunk_size
A = dim
def UpperCamelCase ( self : Dict , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , UpperCamelCase__ : Dict[str, Any] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
A = self.norma(UpperCamelCase__ , UpperCamelCase__ )
elif self.use_ada_layer_norm_zero:
A , A , A , A , A = self.norma(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hidden_dtype=hidden_states.dtype )
else:
A = self.norma(UpperCamelCase__ )
A = cross_attention_kwargs if cross_attention_kwargs is not None else {}
A = self.attna(
UpperCamelCase__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
if self.use_ada_layer_norm_zero:
A = gate_msa.unsqueeze(1 ) * attn_output
A = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
A = (
self.norma(UpperCamelCase__ , UpperCamelCase__ ) if self.use_ada_layer_norm else self.norma(UpperCamelCase__ )
)
A = self.attna(
UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
A = attn_output + hidden_states
# 3. Feed-forward
A = self.norma(UpperCamelCase__ )
if self.use_ada_layer_norm_zero:
A = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
A = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
A = torch.cat(
[self.ff(UpperCamelCase__ ) for hid_slice in norm_hidden_states.chunk(UpperCamelCase__ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
A = self.ff(UpperCamelCase__ )
if self.use_ada_layer_norm_zero:
A = gate_mlp.unsqueeze(1 ) * ff_output
A = ff_output + hidden_states
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 4 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : str = "geglu" , UpperCamelCase__ : bool = False , ):
super().__init__()
A = int(dim * mult )
A = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
A = GELU(UpperCamelCase__ , UpperCamelCase__ )
if activation_fn == "gelu-approximate":
A = GELU(UpperCamelCase__ , UpperCamelCase__ , approximate='tanh' )
elif activation_fn == "geglu":
A = GEGLU(UpperCamelCase__ , UpperCamelCase__ )
elif activation_fn == "geglu-approximate":
A = ApproximateGELU(UpperCamelCase__ , UpperCamelCase__ )
A = nn.ModuleList([] )
# project in
self.net.append(UpperCamelCase__ )
# project dropout
self.net.append(nn.Dropout(UpperCamelCase__ ) )
# project out
self.net.append(nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(UpperCamelCase__ ) )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : int ):
for module in self.net:
A = module(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : str = "none" ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
A = approximate
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Dict ):
if gate.device.type != "mps":
return F.gelu(UpperCamelCase__ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def UpperCamelCase ( self : Any , UpperCamelCase__ : int ):
A = self.proj(UpperCamelCase__ )
A = self.gelu(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , dim_out * 2 )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Tuple ):
if gate.device.type != "mps":
return F.gelu(UpperCamelCase__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def UpperCamelCase ( self : str , UpperCamelCase__ : str ):
A , A = self.proj(UpperCamelCase__ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(UpperCamelCase__ )
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase ( self : Any , UpperCamelCase__ : Optional[int] ):
A = self.proj(UpperCamelCase__ )
return x * torch.sigmoid(1.702 * x )
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ):
super().__init__()
A = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
A = nn.SiLU()
A = nn.Linear(UpperCamelCase__ , embedding_dim * 2 )
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ):
A = self.linear(self.silu(self.emb(UpperCamelCase__ ) ) )
A , A = torch.chunk(UpperCamelCase__ , 2 )
A = self.norm(UpperCamelCase__ ) * (1 + scale) + shift
return x
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : str , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ):
super().__init__()
A = CombinedTimestepLabelEmbeddings(UpperCamelCase__ , UpperCamelCase__ )
A = nn.SiLU()
A = nn.Linear(UpperCamelCase__ , 6 * embedding_dim , bias=UpperCamelCase__ )
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ , eps=1e-6 )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=None ):
A = self.linear(self.silu(self.emb(UpperCamelCase__ , UpperCamelCase__ , hidden_dtype=UpperCamelCase__ ) ) )
A , A , A , A , A , A = emb.chunk(6 , dim=1 )
A = self.norm(UpperCamelCase__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : float = 1e-5 ):
super().__init__()
A = num_groups
A = eps
if act_fn is None:
A = None
else:
A = get_activation(UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , out_dim * 2 )
def UpperCamelCase ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : str ):
if self.act:
A = self.act(UpperCamelCase__ )
A = self.linear(UpperCamelCase__ )
A = emb[:, :, None, None]
A , A = emb.chunk(2 , dim=1 )
A = F.group_norm(UpperCamelCase__ , self.num_groups , eps=self.eps )
A = x * (1 + scale) + shift
return x
| 699 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase_ (__lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Any = KandinskyVaaInpaintPipeline
UpperCamelCase_ : str = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
UpperCamelCase_ : Union[str, Any] = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
UpperCamelCase_ : str = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCamelCase_ : str = False
@property
def a ( self : Union[str, Any] )-> List[Any]:
"""simple docstring"""
return 32
@property
def a ( self : List[Any] )-> Dict:
"""simple docstring"""
return 32
@property
def a ( self : str )-> int:
"""simple docstring"""
return self.time_input_dim
@property
def a ( self : Optional[Any] )-> Dict:
"""simple docstring"""
return self.time_input_dim * 4
@property
def a ( self : str )-> Any:
"""simple docstring"""
return 1_00
@property
def a ( self : Any )-> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : Any = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
UpperCAmelCase_ : List[str] = UNetaDConditionModel(**UpperCamelCase__ )
return model
@property
def a ( self : Optional[Any] )-> List[str]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a ( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def a ( self : Optional[Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.dummy_unet
UpperCAmelCase_ : Tuple = self.dummy_movq
UpperCAmelCase_ : str = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="""linear""" , beta_start=0.00085 , beta_end=0.012 , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=UpperCamelCase__ , )
UpperCAmelCase_ : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def a ( self : Optional[Any] , a_ : int , a_ : Tuple=0 )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
UpperCAmelCase_ : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCamelCase__ )
# create init_image
UpperCAmelCase_ : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
UpperCAmelCase_ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ : List[Any] = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create mask
UpperCAmelCase_ : Optional[Any] = np.ones((64, 64) , dtype=np.floataa )
UpperCAmelCase_ : List[Any] = 0
if str(UpperCamelCase__ ).startswith("""mps""" ):
UpperCAmelCase_ : List[Any] = torch.manual_seed(UpperCamelCase__ )
else:
UpperCAmelCase_ : List[Any] = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
UpperCAmelCase_ : str = {
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def a ( self : List[Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = """cpu"""
UpperCAmelCase_ : Optional[Any] = self.get_dummy_components()
UpperCAmelCase_ : List[str] = self.pipeline_class(**UpperCamelCase__ )
UpperCAmelCase_ : Optional[int] = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCAmelCase_ : Dict = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
UpperCAmelCase_ : Union[str, Any] = output.images
UpperCAmelCase_ : Any = pipe(
**self.get_dummy_inputs(UpperCamelCase__ ) , return_dict=UpperCamelCase__ , )[0]
UpperCAmelCase_ : int = image[0, -3:, -3:, -1]
UpperCAmelCase_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
print(f'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Tuple = np.array(
[0.50775903, 0.49527195, 0.48824543, 0.50192237, 0.48644906, 0.49373814, 0.4780598, 0.47234827, 0.48327848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def a ( self : int )-> Any:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def a ( self : int )-> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : int )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" )
UpperCAmelCase_ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
UpperCAmelCase_ : Optional[Any] = np.ones((7_68, 7_68) , dtype=np.floataa )
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : int = """a hat"""
UpperCAmelCase_ : Any = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase__ )
UpperCAmelCase_ : int = KandinskyVaaInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder-inpaint""" , torch_dtype=torch.floataa )
UpperCAmelCase_ : Tuple = pipeline.to(UpperCamelCase__ )
pipeline.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCAmelCase_ : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCAmelCase_ ,UpperCAmelCase_ : List[str] = pipe_prior(
UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
UpperCAmelCase_ : List[str] = pipeline(
image=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_embeds=UpperCamelCase__ , negative_image_embeds=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="""np""" , )
UpperCAmelCase_ : str = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
| 470 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"vocab_file": "vocab.json",
"tokenizer_config_file": "tokenizer_config.json",
"merges_file": "merges.txt",
}
_UpperCAmelCase = {
"vocab_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"
),
},
"tokenizer_config_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"
),
},
"merges_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"
),
},
}
_UpperCAmelCase = "</w>"
_UpperCAmelCase = "@@ "
def __UpperCamelCase (lowerCAmelCase : Optional[int] ) -> List[str]:
A = set()
A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A = char
return pairs
# Speech2Text2 has no max input length
_UpperCAmelCase = {"facebook/s2t-wav2vec2-large-en-de": 1_024}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Any = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int]="<s>" , UpperCamelCase__ : str="<pad>" , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : Tuple="<unk>" , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : List[str]=None , **UpperCamelCase__ : Optional[int] , ):
super().__init__(
unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , **UpperCamelCase__ , )
A = do_lower_case
with open(UpperCamelCase__ , encoding='utf-8' ) as vocab_handle:
A = json.load(UpperCamelCase__ )
A = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
A = None
A = None
else:
with open(UpperCamelCase__ , encoding='utf-8' ) as merges_handle:
A = merges_handle.read().split('\n' )[:-1]
A = [tuple(merge.split()[:2] ) for merge in merges]
A = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
A = {}
@property
def UpperCamelCase ( self : Union[str, Any] ):
return len(self.decoder )
def UpperCamelCase ( self : Optional[Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Optional[int] ):
A = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
A = get_pairs(UpperCamelCase__ )
if not pairs:
return token
while True:
A = min(UpperCamelCase__ , key=lambda UpperCamelCase__ : self.bpe_ranks.get(UpperCamelCase__ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
A , A = bigram
A = []
A = 0
while i < len(UpperCamelCase__ ):
try:
A = word.index(UpperCamelCase__ , UpperCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A = j
if word[i] == first and i < len(UpperCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A = tuple(UpperCamelCase__ )
A = new_word
if len(UpperCamelCase__ ) == 1:
break
else:
A = get_pairs(UpperCamelCase__ )
A = ' '.join(UpperCamelCase__ )
if word == "\n " + BPE_TOKEN_MERGES:
A = '\n' + BPE_TOKEN_MERGES
if word.endswith(UpperCamelCase__ ):
A = word.replace(UpperCamelCase__ , '' )
A = word.replace(' ' , UpperCamelCase__ )
A = word
return word
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : Dict ):
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
A = text.lower()
A = text.split()
A = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(UpperCamelCase__ ).split(' ' ) ) )
return split_tokens
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : str ):
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def UpperCamelCase ( self : str , UpperCamelCase__ : int ):
A = self.decoder.get(UpperCamelCase__ , self.unk_token )
return result
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : List[str] ):
A = ' '.join(UpperCamelCase__ )
# make sure @@ tokens are concatenated
A = ''.join(string.split(UpperCamelCase__ ) )
return string
def UpperCamelCase ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) + '\n' )
A = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
A = token_index
writer.write(' '.join(UpperCamelCase__ ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 699 | 0 |
'''simple docstring'''
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
__lowerCAmelCase : Optional[Any] =open # noqa: we just need to have a builtin inside this module to test it properly
| 440 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = '''facebook/bart-large-mnli'''
SCREAMING_SNAKE_CASE : Union[str, Any] = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
SCREAMING_SNAKE_CASE : Any = '''text_classifier'''
SCREAMING_SNAKE_CASE : Any = AutoTokenizer
SCREAMING_SNAKE_CASE : Dict = AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE : List[Any] = ['''text''', ['''text''']]
SCREAMING_SNAKE_CASE : Dict = ['''text''']
def UpperCamelCase ( self : List[str] ):
super().setup()
A = self.model.config
A = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
A = int(UpperCamelCase__ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def UpperCamelCase ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ):
A = labels
return self.pre_processor(
[text] * len(UpperCamelCase__ ) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def UpperCamelCase ( self : int , UpperCamelCase__ : List[str] ):
A = outputs.logits
A = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 699 | 0 |
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str]=13 , SCREAMING_SNAKE_CASE__ : int=32 , SCREAMING_SNAKE_CASE__ : Optional[int]=3 , SCREAMING_SNAKE_CASE__ : Any=4 , SCREAMING_SNAKE_CASE__ : Optional[int]=[10, 20, 30, 40] , SCREAMING_SNAKE_CASE__ : Dict=[2, 2, 3, 2] , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Optional[int]=37 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : str=10 , SCREAMING_SNAKE_CASE__ : List[Any]=0.02 , SCREAMING_SNAKE_CASE__ : str=["stage2", "stage3", "stage4"] , SCREAMING_SNAKE_CASE__ : int=[2, 3, 4] , SCREAMING_SNAKE_CASE__ : str=None , ) -> Optional[Any]:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = num_stages
lowerCAmelCase__ = hidden_sizes
lowerCAmelCase__ = depths
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = out_features
lowerCAmelCase__ = out_indices
lowerCAmelCase__ = scope
def a ( self : Any ) -> Tuple:
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def a ( self : Optional[int] ) -> Union[str, Any]:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ) -> int:
lowerCAmelCase__ = ConvNextModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCAmelCase__ = model(UpperCamelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
lowerCAmelCase__ = ConvNextForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCAmelCase__ = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Dict:
lowerCAmelCase__ = ConvNextBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCAmelCase__ = model(UpperCamelCase__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCAmelCase__ = None
lowerCAmelCase__ = ConvNextBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCAmelCase__ = model(UpperCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a ( self : Optional[Any] ) -> str:
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case__ = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
snake_case__ = (
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
snake_case__ = True
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def a ( self : int ) -> Optional[Any]:
lowerCAmelCase__ = ConvNextModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def a ( self : Dict ) -> int:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a ( self : int ) -> Optional[Any]:
return
@unittest.skip(reason="ConvNext does not use inputs_embeds" )
def a ( self : Tuple ) -> int:
pass
@unittest.skip(reason="ConvNext does not support input and output embeddings" )
def a ( self : List[Any] ) -> Any:
pass
@unittest.skip(reason="ConvNext does not use feedforward chunking" )
def a ( self : Union[str, Any] ) -> Tuple:
pass
def a ( self : Dict ) -> List[str]:
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(UpperCamelCase__ )
lowerCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def a ( self : Optional[Any] ) -> Tuple:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def a ( self : Optional[Any] ) -> Optional[Any]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase__ )
def a ( self : Optional[int] ) -> Any:
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ):
lowerCAmelCase__ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCAmelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase__ = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def a ( self : Optional[int] ) -> Any:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def a ( self : List[str] ) -> Union[str, Any]:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = ConvNextModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def a ( self : Optional[Any] ) -> Optional[Any]:
return AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224" ) if is_vision_available() else None
@slow
def a ( self : Any ) -> Tuple:
lowerCAmelCase__ = ConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224" ).to(UpperCamelCase__ )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = image_processor(images=UpperCamelCase__ , return_tensors="pt" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**UpperCamelCase__ )
# verify the logits
lowerCAmelCase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCAmelCase__ = torch.tensor([-0.0_260, -0.4_739, 0.1_911] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@require_torch
class __lowerCamelCase ( unittest.TestCase , __lowercase ):
"""simple docstring"""
snake_case__ = (ConvNextBackbone,) if is_torch_available() else ()
snake_case__ = ConvNextConfig
snake_case__ = False
def a ( self : str ) -> Union[str, Any]:
lowerCAmelCase__ = ConvNextModelTester(self )
| 61 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase (lowerCAmelCase : List[str] ) -> Dict:
A = r'\w+[.]\d+'
A = re.findall(lowerCAmelCase, lowerCAmelCase )
for pat in pats:
A = key.replace(lowerCAmelCase, '_'.join(pat.split('.' ) ) )
return key
def __UpperCamelCase (lowerCAmelCase : Optional[int], lowerCAmelCase : Dict, lowerCAmelCase : Dict ) -> Any:
A = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
A = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
A = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
A = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
A = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
A = pt_tensor.transpose(2, 3, 1, 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
A = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
A = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __UpperCamelCase (lowerCAmelCase : Tuple, lowerCAmelCase : Any, lowerCAmelCase : str=42 ) -> Any:
# Step 1: Convert pytorch tensor to numpy
A = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
A = flax_model.init_weights(PRNGKey(lowerCAmelCase ) )
A = flatten_dict(lowerCAmelCase )
A = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A = rename_key(lowerCAmelCase )
A = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
A , A = rename_key_and_reshape_tensor(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
A = jnp.asarray(lowerCAmelCase )
return unflatten_dict(lowerCAmelCase )
| 699 | 0 |
'''simple docstring'''
def lowerCamelCase__ ( A : int ):
'''simple docstring'''
UpperCAmelCase = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 210 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : Collection[float] | None = None ):
if components is None:
A = []
A = list(UpperCamelCase__ )
def __len__( self : List[Any] ):
return len(self.__components )
def __str__( self : str ):
return "(" + ",".join(map(UpperCamelCase__ , self.__components ) ) + ")"
def __add__( self : str , UpperCamelCase__ : Vector ):
A = len(self )
if size == len(UpperCamelCase__ ):
A = [self.__components[i] + other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return Vector(UpperCamelCase__ )
else:
raise Exception('must have the same size' )
def __sub__( self : Dict , UpperCamelCase__ : Vector ):
A = len(self )
if size == len(UpperCamelCase__ ):
A = [self.__components[i] - other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return Vector(UpperCamelCase__ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self : Tuple , UpperCamelCase__ : float ):
...
@overload
def __mul__( self : Dict , UpperCamelCase__ : Vector ):
...
def __mul__( self : Union[str, Any] , UpperCamelCase__ : float | Vector ):
if isinstance(UpperCamelCase__ , (float, int) ):
A = [c * other for c in self.__components]
return Vector(UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(self ) == len(UpperCamelCase__ ):
A = len(self )
A = [self.__components[i] * other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return sum(UpperCamelCase__ )
else: # error case
raise Exception('invalid operand!' )
def UpperCamelCase ( self : Union[str, Any] ):
return Vector(self.__components )
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : int ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def UpperCamelCase ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : float ):
assert -len(self.__components ) <= pos < len(self.__components )
A = value
def UpperCamelCase ( self : str ):
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
A = [c**2 for c in self.__components]
return math.sqrt(sum(UpperCamelCase__ ) )
def UpperCamelCase ( self : Any , UpperCamelCase__ : Vector , UpperCamelCase__ : bool = False ):
A = self * other
A = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def __UpperCamelCase (lowerCAmelCase : int ) -> Vector:
assert isinstance(lowerCAmelCase, lowerCAmelCase )
return Vector([0] * dimension )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> Vector:
assert isinstance(lowerCAmelCase, lowerCAmelCase ) and (isinstance(lowerCAmelCase, lowerCAmelCase ))
A = [0] * dimension
A = 1
return Vector(lowerCAmelCase )
def __UpperCamelCase (lowerCAmelCase : float, lowerCAmelCase : Vector, lowerCAmelCase : Vector ) -> Vector:
assert (
isinstance(lowerCAmelCase, lowerCAmelCase )
and isinstance(lowerCAmelCase, lowerCAmelCase )
and (isinstance(lowerCAmelCase, (int, float) ))
)
return x * scalar + y
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int ) -> Vector:
random.seed(lowerCAmelCase )
A = [random.randint(lowerCAmelCase, lowerCAmelCase ) for _ in range(lowerCAmelCase )]
return Vector(lowerCAmelCase )
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : list[list[float]] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
A = matrix
A = w
A = h
def __str__( self : int ):
A = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Optional[Any] , UpperCamelCase__ : Matrix ):
if self.__width == other.width() and self.__height == other.height():
A = []
for i in range(self.__height ):
A = [
self.__matrix[i][j] + other.component(UpperCamelCase__ , UpperCamelCase__ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase__ )
return Matrix(UpperCamelCase__ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self : Dict , UpperCamelCase__ : Matrix ):
if self.__width == other.width() and self.__height == other.height():
A = []
for i in range(self.__height ):
A = [
self.__matrix[i][j] - other.component(UpperCamelCase__ , UpperCamelCase__ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase__ )
return Matrix(UpperCamelCase__ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self : int , UpperCamelCase__ : float ):
...
@overload
def __mul__( self : Union[str, Any] , UpperCamelCase__ : Vector ):
...
def __mul__( self : Tuple , UpperCamelCase__ : float | Vector ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ): # matrix-vector
if len(UpperCamelCase__ ) == self.__width:
A = zero_vector(self.__height )
for i in range(self.__height ):
A = [
self.__matrix[i][j] * other.component(UpperCamelCase__ )
for j in range(self.__width )
]
ans.change_component(UpperCamelCase__ , sum(UpperCamelCase__ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(UpperCamelCase__ , (int, float) ): # matrix-scalar
A = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(UpperCamelCase__ , self.__width , self.__height )
return None
def UpperCamelCase ( self : Optional[int] ):
return self.__height
def UpperCamelCase ( self : List[Any] ):
return self.__width
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def UpperCamelCase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float ):
if 0 <= x < self.__height and 0 <= y < self.__width:
A = value
else:
raise Exception('change_component: indices out of bounds' )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
A = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(UpperCamelCase__ ) ):
A = minor[i][:y] + minor[i][y + 1 :]
return Matrix(UpperCamelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def UpperCamelCase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(UpperCamelCase__ , UpperCamelCase__ )
else:
raise Exception('Indices out of bounds' )
def UpperCamelCase ( self : Tuple ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
A = [
self.__matrix[0][y] * self.cofactor(0 , UpperCamelCase__ ) for y in range(self.__width )
]
return sum(UpperCamelCase__ )
def __UpperCamelCase (lowerCAmelCase : int ) -> Matrix:
A = [[0] * n for _ in range(lowerCAmelCase )]
return Matrix(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int ) -> Matrix:
random.seed(lowerCAmelCase )
A = [
[random.randint(lowerCAmelCase, lowerCAmelCase ) for _ in range(lowerCAmelCase )] for _ in range(lowerCAmelCase )
]
return Matrix(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
| 699 | 0 |
from math import isqrt
def _lowercase ( UpperCamelCase_ ) -> bool:
'''simple docstring'''
return all(number % divisor != 0 for divisor in range(2 , isqrt(UpperCamelCase_ ) + 1 ) )
def _lowercase ( UpperCamelCase_ = 10**6 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 7
while prime_candidate < max_prime:
primes_count += is_prime(UpperCamelCase_ )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 472 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = '''blenderbot-small'''
SCREAMING_SNAKE_CASE : Any = ['''past_key_values''']
SCREAMING_SNAKE_CASE : List[str] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] , UpperCamelCase__ : Optional[Any]=50265 , UpperCamelCase__ : Optional[int]=512 , UpperCamelCase__ : int=8 , UpperCamelCase__ : Optional[int]=2048 , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Optional[Any]=8 , UpperCamelCase__ : List[Any]=2048 , UpperCamelCase__ : int=16 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Any=512 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Union[str, Any]=0.0 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Dict=2 , **UpperCamelCase__ : List[str] , ):
A = vocab_size
A = max_position_embeddings
A = d_model
A = encoder_ffn_dim
A = encoder_layers
A = encoder_attention_heads
A = decoder_ffn_dim
A = decoder_layers
A = decoder_attention_heads
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = encoder_layerdrop
A = decoder_layerdrop
A = use_cache
A = encoder_layers
A = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
@property
def UpperCamelCase ( self : List[Any] ):
if self.task in ["default", "seq2seq-lm"]:
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
A = {0: 'batch'}
A = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
A = {0: 'batch', 1: 'decoder_sequence'}
A = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
A , A = self.num_layers
for i in range(UpperCamelCase__ ):
A = {0: 'batch', 2: 'past_sequence + sequence'}
A = {0: 'batch', 2: 'past_sequence + sequence'}
else:
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def UpperCamelCase ( self : int ):
if self.task in ["default", "seq2seq-lm"]:
A = super().outputs
else:
A = super(UpperCamelCase__ , self ).outputs
if self.use_past:
A , A = self.num_layers
for i in range(UpperCamelCase__ ):
A = {0: 'batch', 2: 'past_sequence + sequence'}
A = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def UpperCamelCase ( self : int , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Generate decoder inputs
A = seq_length if not self.use_past else 1
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
A = dict(**UpperCamelCase__ , **UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A , A = common_inputs['input_ids'].shape
A = common_inputs['decoder_input_ids'].shape[1]
A , A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = decoder_seq_length + 3
A = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(UpperCamelCase__ , UpperCamelCase__ )] , dim=1 )
A = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A , A = self.num_layers
A = min(UpperCamelCase__ , UpperCamelCase__ )
A = max(UpperCamelCase__ , UpperCamelCase__ ) - min_num_layers
A = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(UpperCamelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
) )
# TODO: test this.
A = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(UpperCamelCase__ , UpperCamelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) )
return common_inputs
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A , A = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
A = seqlen + 2
A , A = self.num_layers
A , A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = common_inputs['attention_mask'].dtype
A = torch.cat(
[common_inputs['attention_mask'], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
A = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(UpperCamelCase__ )
]
return common_inputs
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A = tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
A = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
A = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
A = dict(tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ ) )
return common_inputs
def UpperCamelCase ( self : Any , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
A = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
elif self.task == "causal-lm":
A = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
else:
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
return common_inputs
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple ):
if self.task in ["default", "seq2seq-lm"]:
A = super()._flatten_past_key_values_(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
A = super(UpperCamelCase__ , self )._flatten_past_key_values_(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
| 699 | 0 |
"""simple docstring"""
import math
import os
import sys
def snake_case__ ( _lowerCamelCase ) ->str:
"""simple docstring"""
__lowercase : Any = ""
try:
with open(_lowerCamelCase, "rb" ) as binary_file:
__lowercase : Tuple = binary_file.read()
for dat in data:
__lowercase : List[str] = F'{dat:08b}'
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) ->None:
"""simple docstring"""
lexicon.pop(_lowerCamelCase )
__lowercase : Any = last_match_id
if math.loga(_lowerCamelCase ).is_integer():
for curr_key in lexicon:
__lowercase : str = "0" + lexicon[curr_key]
__lowercase : Dict = bin(_lowerCamelCase )[2:]
def snake_case__ ( _lowerCamelCase ) ->str:
"""simple docstring"""
__lowercase : Union[str, Any] = {"0": "0", "1": "1"}
__lowercase ,__lowercase : List[Any] = "", ""
__lowercase : List[str] = len(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__lowercase : str = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
index += 1
__lowercase : int = ""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__lowercase : Any = lexicon[curr_string]
result += last_match_id
return result
def snake_case__ ( _lowerCamelCase, _lowerCamelCase ) ->str:
"""simple docstring"""
__lowercase : List[str] = os.path.getsize(_lowerCamelCase )
__lowercase : Tuple = bin(_lowerCamelCase )[2:]
__lowercase : Union[str, Any] = len(_lowerCamelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def snake_case__ ( _lowerCamelCase, _lowerCamelCase ) ->None:
"""simple docstring"""
__lowercase : Any = 8
try:
with open(_lowerCamelCase, "wb" ) as opened_file:
__lowercase : List[str] = [
to_write[i : i + byte_length]
for i in range(0, len(_lowerCamelCase ), _lowerCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(_lowerCamelCase, 2 ).to_bytes(1, byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def snake_case__ ( _lowerCamelCase, _lowerCamelCase ) ->None:
"""simple docstring"""
__lowercase : Tuple = read_file_binary(_lowerCamelCase )
__lowercase : int = compress_data(_lowerCamelCase )
__lowercase : int = add_file_length(_lowerCamelCase, _lowerCamelCase )
write_file_binary(_lowerCamelCase, _lowerCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 575 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE : List[str] = '''BridgeTowerImageProcessor'''
SCREAMING_SNAKE_CASE : Tuple = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ):
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCamelCase__ : Union[bool, str, TruncationStrategy] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : List[Any] , ):
A = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
# add pixel_values + pixel_mask
A = self.image_processor(
UpperCamelCase__ , return_tensors=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_center_crop=UpperCamelCase__ , **UpperCamelCase__ )
encoding.update(UpperCamelCase__ )
return encoding
def UpperCamelCase ( self : Dict , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Any ):
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def UpperCamelCase ( self : int , *UpperCamelCase__ : int , **UpperCamelCase__ : List[str] ):
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def UpperCamelCase ( self : Any ):
A = self.tokenizer.model_input_names
A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 699 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class __magic_name__ ( __lowercase ):
"""simple docstring"""
__UpperCamelCase = '''facebook/bart-large-mnli'''
__UpperCamelCase = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
__UpperCamelCase = '''text_classifier'''
__UpperCamelCase = AutoTokenizer
__UpperCamelCase = AutoModelForSequenceClassification
__UpperCamelCase = ['''text''', ['''text''']]
__UpperCamelCase = ['''text''']
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
super().setup()
A_ : Dict = self.model.config
A_ : int = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
A_ : List[Any] = int(UpperCamelCase__ )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :Optional[Any] , snake_case :Dict ):
'''simple docstring'''
A_ : List[str] = labels
return self.pre_processor(
[text] * len(UpperCamelCase__ ) , [f"This example is {label}" for label in labels] , return_tensors="pt" , padding="max_length" , )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[str] ):
'''simple docstring'''
A_ : str = outputs.logits
A_ : Dict = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 454 |
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> str:
return "\n".join(
f'''{number} * {i} = {number * i}''' for i in range(1, number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 699 | 0 |
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def __lowerCAmelCase ( ) -> None:
"""simple docstring"""
print("Making key files..." )
make_key_files("rsa" , 1024 )
print("Key files generation successful." )
def __lowerCAmelCase ( lowercase : int ) -> tuple[tuple[int, int], tuple[int, int]]:
"""simple docstring"""
print("Generating prime p..." )
snake_case : int = rabinMiller.generate_large_prime(lowercase )
print("Generating prime q..." )
snake_case : Union[str, Any] = rabinMiller.generate_large_prime(lowercase )
snake_case : Optional[int] = p * q
print("Generating e that is relatively prime to (p - 1) * (q - 1)..." )
while True:
snake_case : Optional[Any] = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(lowercase , (p - 1) * (q - 1) ) == 1:
break
print("Calculating d that is mod inverse of e..." )
snake_case : str = cryptoMath.find_mod_inverse(lowercase , (p - 1) * (q - 1) )
snake_case : List[Any] = (n, e)
snake_case : List[Any] = (n, d)
return (public_key, private_key)
def __lowerCAmelCase ( lowercase : str , lowercase : int ) -> None:
"""simple docstring"""
if os.path.exists(F'{name}_pubkey.txt' ) or os.path.exists(F'{name}_privkey.txt' ):
print("\nWARNING:" )
print(
F'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
"Use a different name or delete these files and re-run this program." )
sys.exit()
snake_case ,snake_case : str = generate_key(lowercase )
print(F'\nWriting public key to file {name}_pubkey.txt...' )
with open(F'{name}_pubkey.txt' , "w" ) as out_file:
out_file.write(F'{key_size},{public_key[0]},{public_key[1]}' )
print(F'Writing private key to file {name}_privkey.txt...' )
with open(F'{name}_privkey.txt' , "w" ) as out_file:
out_file.write(F'{key_size},{private_key[0]},{private_key[1]}' )
if __name__ == "__main__":
main()
| 178 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _UpperCAmelCase ( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self : Any , UpperCamelCase__ : int = 128 , UpperCamelCase__ : int = 256 , UpperCamelCase__ : float = 2_000.0 , UpperCamelCase__ : int = 768 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 64 , UpperCamelCase__ : int = 2048 , UpperCamelCase__ : float = 0.1 , ):
super().__init__()
A = nn.Sequential(
nn.Linear(UpperCamelCase__ , d_model * 4 , bias=UpperCamelCase__ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=UpperCamelCase__ ) , nn.SiLU() , )
A = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
A = False
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Dropout(p=UpperCamelCase__ )
A = nn.ModuleList()
for lyr_num in range(UpperCamelCase__ ):
# FiLM conditional T5 decoder
A = DecoderLayer(d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ )
self.decoders.append(UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ )
A = nn.Dropout(p=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : int ):
A = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] ):
A , A , A = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
A = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
A = self.conditioning_emb(UpperCamelCase__ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
A = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
A = torch.broadcast_to(
torch.arange(UpperCamelCase__ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
A = self.position_encoding(UpperCamelCase__ )
A = self.continuous_inputs_projection(UpperCamelCase__ )
inputs += position_encodings
A = self.dropout(UpperCamelCase__ )
# decoder: No padding present.
A = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
A = [(x, self.encoder_decoder_mask(UpperCamelCase__ , UpperCamelCase__ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
A = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
A = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
A = lyr(
UpperCamelCase__ , conditioning_emb=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )[0]
A = self.decoder_norm(UpperCamelCase__ )
A = self.post_dropout(UpperCamelCase__ )
A = self.spec_out(UpperCamelCase__ )
return spec_out
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]=1e-6 ):
super().__init__()
A = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , dropout_rate=UpperCamelCase__ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ ) )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[Any]=None , ):
A = self.layer[0](
UpperCamelCase__ , conditioning_emb=UpperCamelCase__ , attention_mask=UpperCamelCase__ , )
if encoder_hidden_states is not None:
A = torch.where(encoder_attention_mask > 0 , 0 , -1e1_0 ).to(
encoder_hidden_states.dtype )
A = self.layer[1](
UpperCamelCase__ , key_value_states=UpperCamelCase__ , attention_mask=UpperCamelCase__ , )
# Apply Film Conditional Feed Forward layer
A = self.layer[-1](UpperCamelCase__ , UpperCamelCase__ )
return (hidden_states,)
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict ):
super().__init__()
A = TaLayerNorm(UpperCamelCase__ )
A = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase__ )
A = Attention(query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , out_bias=UpperCamelCase__ , scale_qk=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Tuple=None , ):
# pre_self_attention_layer_norm
A = self.layer_norm(UpperCamelCase__ )
if conditioning_emb is not None:
A = self.FiLMLayer(UpperCamelCase__ , UpperCamelCase__ )
# Self-attention block
A = self.attention(UpperCamelCase__ )
A = hidden_states + self.dropout(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ):
super().__init__()
A = Attention(query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , out_bias=UpperCamelCase__ , scale_qk=UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ , eps=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[str]=None , ):
A = self.layer_norm(UpperCamelCase__ )
A = self.attention(
UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , attention_mask=attention_mask.squeeze(1 ) , )
A = hidden_states + self.dropout(UpperCamelCase__ )
return layer_output
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any ):
super().__init__()
A = TaDenseGatedActDense(d_model=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ )
A = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ , eps=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any=None ):
A = self.layer_norm(UpperCamelCase__ )
if conditioning_emb is not None:
A = self.film(UpperCamelCase__ , UpperCamelCase__ )
A = self.DenseReluDense(UpperCamelCase__ )
A = hidden_states + self.dropout(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
A = NewGELUActivation()
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] ):
A = self.act(self.wi_a(UpperCamelCase__ ) )
A = self.wi_a(UpperCamelCase__ )
A = hidden_gelu * hidden_linear
A = self.dropout(UpperCamelCase__ )
A = self.wo(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=1e-6 ):
super().__init__()
A = nn.Parameter(torch.ones(UpperCamelCase__ ) )
A = eps
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : int ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
A = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=UpperCamelCase__ )
A = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
A = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def UpperCamelCase ( self : Any , UpperCamelCase__ : torch.Tensor ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(UpperCamelCase__ , 3.0 )) ))
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , out_features * 2 , bias=UpperCamelCase__ )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] ):
A = self.scale_bias(UpperCamelCase__ )
A , A = torch.chunk(UpperCamelCase__ , 2 , -1 )
A = x * (1 + scale) + shift
return x
| 699 | 0 |
'''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
__lowerCamelCase : Dict = HfApi()
__lowerCamelCase : str = {}
# fmt: off
__lowerCamelCase : List[str] = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
__lowerCamelCase : Union[str, Any] = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
__lowerCamelCase : Optional[Any] = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
__lowerCamelCase : Union[str, Any] = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
__lowerCamelCase : List[str] = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
__lowerCamelCase : List[Any] = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
__lowerCamelCase : int = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
__lowerCamelCase : Any = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
__lowerCamelCase : Optional[int] = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
__lowerCamelCase : Tuple = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
__lowerCamelCase : Optional[Any] = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
__lowerCamelCase : Any = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
__lowerCamelCase : Optional[Any] = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
__lowerCamelCase : List[str] = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
__lowerCamelCase : str = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
__lowerCamelCase : str = api.list_models(filter="""diffusers""")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
__lowerCamelCase : int = """/home/patrick/google_checkpoints/""" + mod.modelId.split("""/""")[-1]
print(f"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith("""CompVis"""):
__lowerCamelCase : Tuple = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""")
else:
__lowerCamelCase : Optional[int] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
__lowerCamelCase : Optional[Any] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
__lowerCamelCase : int = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
__lowerCamelCase : Dict = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1e-3
)
print(f"""{mod.modelId} has passed successfully!!!""")
| 501 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
_UpperCAmelCase = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
_UpperCAmelCase = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ):
A = WATERMARK_BITS
A = WatermarkEncoder()
self.encoder.set_watermark('bits' , self.watermark )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : torch.FloatTensor ):
# can't encode images that are smaller than 256
if images.shape[-1] < 256:
return images
A = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
A = [self.encoder.encode(UpperCamelCase__ , 'dwtDct' ) for image in images]
A = torch.from_numpy(np.array(UpperCamelCase__ ) ).permute(0 , 3 , 1 , 2 )
A = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 699 | 0 |
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowerCAmelCase__ ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict )-> Dict:
A__ = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A__ = {
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
A__ = f"{src_lang}-{tgt_lang}"
A__ = f"\n---\nlanguage:\n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt16\n- allenai\nlicense: apache-2.0\ndatasets:\n- wmt16\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.\n\nFor more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).\n\nAll 3 models are available:\n\n* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)\n* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)\n* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)\n\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"allenai/{model_name}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n\n## Training data\n\nPretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).\n\n## Eval results\n\nHere are the BLEU scores:\n\nmodel | fairseq | transformers\n-------|---------|----------\n{model_name} | {scores[model_name][0]} | {scores[model_name][1]}\n\nThe score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=5\nmkdir -p $DATA_DIR\nsacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt16/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)\n\n\n### BibTeX entry and citation info\n\n```\n@misc{{kasai2020deep,\n title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},\n author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},\n year={{2020}},\n eprint={{2006.10369}},\n archivePrefix={{arXiv}},\n primaryClass={{cs.CL}}\n}}\n```\n\n"
model_card_dir.mkdir(parents=UpperCamelCase_ , exist_ok=UpperCamelCase_ )
A__ = os.path.join(UpperCamelCase_ , '''README.md''' )
print(f"Generating {path}" )
with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(UpperCamelCase_ )
# make sure we are under the root of the project
_lowercase = Path(__file__).resolve().parent.parent.parent
_lowercase = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
_lowercase = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 632 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
_UpperCAmelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : Dict, lowerCAmelCase : Optional[int], lowerCAmelCase : List[Any], lowerCAmelCase : str ) -> int:
for attribute in key.split('.' ):
A = getattr(lowerCAmelCase, lowerCAmelCase )
if weight_type is not None:
A = getattr(lowerCAmelCase, lowerCAmelCase ).shape
else:
A = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
A = value
elif weight_type == "weight_g":
A = value
elif weight_type == "weight_v":
A = value
elif weight_type == "bias":
A = value
else:
A = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __UpperCamelCase (lowerCAmelCase : List[str], lowerCAmelCase : Optional[int] ) -> Dict:
A = []
A = fairseq_model.state_dict()
A = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
A = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, hf_model.config.feat_extract_norm == 'group', )
A = True
else:
for key, mapped_key in MAPPING.items():
A = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
A = True
if "*" in mapped_key:
A = name.split(lowerCAmelCase )[0].split('.' )[-2]
A = mapped_key.replace('*', lowerCAmelCase )
if "weight_g" in name:
A = 'weight_g'
elif "weight_v" in name:
A = 'weight_v'
elif "bias" in name:
A = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A = 'weight'
else:
A = None
set_recursively(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
continue
if not is_used:
unused_weights.append(lowerCAmelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : Tuple, lowerCAmelCase : List[Any], lowerCAmelCase : int ) -> Dict:
A = full_name.split('conv_layers.' )[-1]
A = name.split('.' )
A = int(items[0] )
A = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCAmelCase )
@torch.no_grad()
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : Dict, lowerCAmelCase : Union[str, Any]=None, lowerCAmelCase : str=None, lowerCAmelCase : List[Any]=True ) -> Union[str, Any]:
if config_path is not None:
A = UniSpeechSatConfig.from_pretrained(lowerCAmelCase )
else:
A = UniSpeechSatConfig()
A = ''
if is_finetuned:
A = UniSpeechSatForCTC(lowerCAmelCase )
else:
A = UniSpeechSatForPreTraining(lowerCAmelCase )
A , A , A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
A = model[0].eval()
recursively_load_weights(lowerCAmelCase, lowerCAmelCase )
hf_wavavec.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCAmelCase = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 699 | 0 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : List[str] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__lowerCAmelCase : List[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
F'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
F'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
F'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', F'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', F'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qpos_proj.weight', F'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kpos_proj.weight', F'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.weight', F'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', F'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', F'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kpos_proj.weight', F'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.weight', F'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', F'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', F'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(F'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', F'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.sa_qpos_proj.bias', F'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_kpos_proj.bias', F'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.bias', F'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', F'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', F'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((F'transformer.decoder.layers.{i}.ca_kpos_proj.bias', F'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.bias', F'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', F'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
__lowercase : Optional[int] = state_dict.pop(__lowerCAmelCase )
__lowercase : Optional[int] = val
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Tuple:
__lowercase : Optional[Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
__lowercase : Any = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
__lowercase : int = value
else:
__lowercase : Tuple = value
return new_state_dict
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase=False ) -> List[Any]:
__lowercase : Tuple = ''''''
if is_panoptic:
__lowercase : Tuple = '''conditional_detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
__lowercase : Tuple = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
__lowercase : Tuple = state_dict.pop(F'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__lowercase : Tuple = in_proj_weight[:256, :]
__lowercase : List[Any] = in_proj_bias[:256]
__lowercase : Any = in_proj_weight[256:512, :]
__lowercase : Optional[int] = in_proj_bias[256:512]
__lowercase : Dict = in_proj_weight[-256:, :]
__lowercase : int = in_proj_bias[-256:]
def UpperCAmelCase_ ( ) -> List[Any]:
__lowercase : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowercase : Tuple = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
__lowercase : Union[str, Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
__lowercase : Tuple = '''resnet101'''
if "dc5" in model_name:
__lowercase : Optional[int] = True
__lowercase : List[str] = '''panoptic''' in model_name
if is_panoptic:
__lowercase : Dict = 250
else:
__lowercase : Optional[Any] = 91
__lowercase : Optional[Any] = '''huggingface/label-files'''
__lowercase : Tuple = '''coco-detection-id2label.json'''
__lowercase : List[str] = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
__lowercase : Dict = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
__lowercase : Any = idalabel
__lowercase : str = {v: k for k, v in idalabel.items()}
# load image processor
__lowercase : List[Any] = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
__lowercase : Any = ConditionalDetrImageProcessor(format=__lowerCAmelCase )
# prepare image
__lowercase : Optional[int] = prepare_img()
__lowercase : Optional[Any] = image_processor(images=__lowerCAmelCase , return_tensors='''pt''' )
__lowercase : List[Any] = encoding['''pixel_values''']
logger.info(F'Converting model {model_name}...' )
# load original model from torch hub
__lowercase : Any = torch.hub.load('''DeppMeng/ConditionalDETR''' , __lowerCAmelCase , pretrained=__lowerCAmelCase ).eval()
__lowercase : Dict = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
__lowercase : Dict = '''conditional_detr.''' + src
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__lowercase : str = rename_backbone_keys(__lowerCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(__lowerCAmelCase , is_panoptic=__lowerCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
__lowercase : Any = '''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
__lowercase : Tuple = state_dict.pop(__lowerCAmelCase )
__lowercase : Union[str, Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
__lowercase : Any = state_dict.pop(__lowerCAmelCase )
__lowercase : Dict = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
__lowercase : Dict = state_dict.pop(__lowerCAmelCase )
__lowercase : Union[str, Any] = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
__lowercase : List[str] = state_dict.pop(__lowerCAmelCase )
__lowercase : int = val
# finally, create HuggingFace model and load state dict
__lowercase : Tuple = ConditionalDetrForSegmentation(__lowerCAmelCase ) if is_panoptic else ConditionalDetrForObjectDetection(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
model.push_to_hub(repo_id=__lowerCAmelCase , organization='''DepuMeng''' , commit_message='''Add model''' )
# verify our conversion
__lowercase : Optional[Any] = conditional_detr(__lowerCAmelCase )
__lowercase : Any = model(__lowerCAmelCase )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1E-4 )
# Save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
__lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
__lowerCAmelCase : Tuple = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 509 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_UpperCAmelCase = TypeVar("T")
class _UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : T ):
A = data
A = None
def __str__( self : Optional[int] ):
return f'''{self.data}'''
class _UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple ):
A = None
def __iter__( self : int ):
A = self.top
while node:
yield node.data
A = node.next
def __str__( self : Any ):
return "->".join([str(UpperCamelCase__ ) for item in self] )
def __len__( self : Dict ):
return len(tuple(iter(self ) ) )
def UpperCamelCase ( self : List[str] ):
return self.top is None
def UpperCamelCase ( self : Dict , UpperCamelCase__ : T ):
A = Node(UpperCamelCase__ )
if not self.is_empty():
A = self.top
A = node
def UpperCamelCase ( self : Dict ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , UpperCamelCase__ )
A = self.top
A = self.top.next
return pop_node.data
def UpperCamelCase ( self : List[str] ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def UpperCamelCase ( self : List[str] ):
A = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 699 | 0 |
"""simple docstring"""
def A_ ( lowercase ) -> list:
"""simple docstring"""
if bit_count < 0:
raise ValueError("""The given input must be positive""" )
# get the generated string sequence
UpperCAmelCase_ : Union[str, Any] = gray_code_sequence_string(lowercase )
#
# convert them to integers
for i in range(len(lowercase ) ):
UpperCAmelCase_ : str = int(sequence[i] , 2 )
return sequence
def A_ ( lowercase ) -> list:
"""simple docstring"""
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
UpperCAmelCase_ : Union[str, Any] = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
UpperCAmelCase_ : Any = gray_code_sequence_string(bit_count - 1 )
UpperCAmelCase_ : int = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
UpperCAmelCase_ : Union[str, Any] = """0""" + smaller_sequence[i]
sequence.append(lowercase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
UpperCAmelCase_ : List[Any] = """1""" + smaller_sequence[i]
sequence.append(lowercase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 470 |
from __future__ import annotations
import math
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : bool, lowerCAmelCase : list[int], lowerCAmelCase : float ) -> int:
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if not scores:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1, node_index * 2, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), )
if is_max
else min(
minimax(depth + 1, node_index * 2, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), )
)
def __UpperCamelCase () -> None:
A = [90, 23, 6, 33, 21, 65, 123, 34_423]
A = math.log(len(lowerCAmelCase ), 2 )
print(f'''Optimal value : {minimax(0, 0, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 699 | 0 |
'''simple docstring'''
import operator as op
__lowerCAmelCase : List[str] ="scaler.pt"
__lowerCAmelCase : int ="pytorch_model"
__lowerCAmelCase : Tuple ="random_states"
__lowerCAmelCase : Dict ="optimizer"
__lowerCAmelCase : Tuple ="scheduler"
__lowerCAmelCase : Dict ="pytorch_model.bin"
__lowerCAmelCase : Union[str, Any] ="pytorch_model.bin.index.json"
__lowerCAmelCase : int ="model.safetensors"
__lowerCAmelCase : Any ="model.safetensors.index.json"
__lowerCAmelCase : List[str] ="1.10.2"
__lowerCAmelCase : Dict ="py38"
__lowerCAmelCase : str ="4.17.0"
__lowerCAmelCase : List[Any] =["ml.p3.16xlarge", "ml.p3dn.24xlarge", "ml.p4dn.24xlarge"]
__lowerCAmelCase : int =["FULL_SHARD", "SHARD_GRAD_OP", "NO_SHARD", "HYBRID_SHARD", "HYBRID_SHARD_ZERO2"]
__lowerCAmelCase : Tuple =["TRANSFORMER_BASED_WRAP", "SIZE_BASED_WRAP", "NO_WRAP"]
__lowerCAmelCase : Optional[int] =["BACKWARD_PRE", "BACKWARD_POST", "NO_PREFETCH"]
__lowerCAmelCase : List[str] =["FULL_STATE_DICT", "LOCAL_STATE_DICT", "SHARDED_STATE_DICT"]
__lowerCAmelCase : int ="2.0.1"
__lowerCAmelCase : Any =["pdsh", "standard", "openmpi", "mvapich"]
__lowerCAmelCase : List[Any] =["default", "reduce-overhead", "max-autotune"]
__lowerCAmelCase : Optional[int] ={">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
__lowerCAmelCase : Tuple =[
"nnodes",
"nproc_per_node",
"rdzv_backend",
"rdzv_endpoint",
"rdzv_id",
"rdzv_conf",
"standalone",
"max_restarts",
"monitor_interval",
"start_method",
"role",
"module",
"m",
"no_python",
"run_path",
"log_dir",
"r",
"redirects",
"t",
"tee",
"node_rank",
"master_addr",
"master_port",
]
__lowerCAmelCase : Dict =["DEEPSPEED", "MULTI_GPU", "FSDP", "MEGATRON_LM"]
__lowerCAmelCase : Dict =["DEEPSPEED", "MULTI_XPU", "FSDP"]
| 440 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : PreTrainedTokenizer, lowerCAmelCase : int, lowerCAmelCase : Optional[int] = None, ) -> Dict:
A = {}
if train_file is not None:
A = [train_file]
if eval_file is not None:
A = [eval_file]
if test_file is not None:
A = [test_file]
A = datasets.load_dataset('csv', data_files=lowerCAmelCase )
A = list(ds[list(files.keys() )[0]].features.keys() )
A = features_name.pop(lowerCAmelCase )
A = list(set(ds[list(files.keys() )[0]][label_name] ) )
A = {label: i for i, label in enumerate(lowerCAmelCase )}
A = tokenizer.model_input_names
A = {}
if len(lowerCAmelCase ) == 1:
for k in files.keys():
A = ds[k].map(
lambda lowerCAmelCase : tokenizer.batch_encode_plus(
example[features_name[0]], truncation=lowerCAmelCase, max_length=lowerCAmelCase, padding='max_length' ), batched=lowerCAmelCase, )
elif len(lowerCAmelCase ) == 2:
for k in files.keys():
A = ds[k].map(
lambda lowerCAmelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]), truncation=lowerCAmelCase, max_length=lowerCAmelCase, padding='max_length', ), batched=lowerCAmelCase, )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
A = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
A = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
A = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = field(metadata={'''help''': '''Which column contains the label'''} )
SCREAMING_SNAKE_CASE : str = field(default=__lowercase , metadata={'''help''': '''The path of the training file'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase , metadata={'''help''': '''The path of the development file'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase , metadata={'''help''': '''The path of the test file'''} )
SCREAMING_SNAKE_CASE : int = field(
default=1_28 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE : bool = field(default=__lowercase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def __UpperCamelCase () -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
A , A , A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, )
logger.info(
f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
f'''16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
A , A , A , A = get_tfds(
train_file=data_args.train_file, eval_file=data_args.dev_file, test_file=data_args.test_file, tokenizer=lowerCAmelCase, label_column_id=data_args.label_column_id, max_seq_length=data_args.max_seq_length, )
A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=len(lowerCAmelCase ), labelaid=lowerCAmelCase, idalabel={id: label for label, id in labelaid.items()}, finetuning_task='text-classification', cache_dir=model_args.cache_dir, )
with training_args.strategy.scope():
A = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path, from_pt=bool('.bin' in model_args.model_name_or_path ), config=lowerCAmelCase, cache_dir=model_args.cache_dir, )
def compute_metrics(lowerCAmelCase : EvalPrediction ) -> Dict:
A = np.argmax(p.predictions, axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
A = TFTrainer(
model=lowerCAmelCase, args=lowerCAmelCase, train_dataset=lowerCAmelCase, eval_dataset=lowerCAmelCase, compute_metrics=lowerCAmelCase, )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A = trainer.evaluate()
A = os.path.join(training_args.output_dir, 'eval_results.txt' )
with open(lowerCAmelCase, 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
results.update(lowerCAmelCase )
return results
if __name__ == "__main__":
main()
| 699 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def _A ( lowerCAmelCase_ : List[str]=None ):
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser(add_help=lowerCAmelCase_ , allow_abbrev=lowerCAmelCase_ )
# The main config parser
lowerCAmelCase__ = config_command_parser(lowerCAmelCase_ )
# The subparser to add commands to
lowerCAmelCase__ = config_parser.add_subparsers(title="subcommands" , dest="subcommand" )
# Then add other parsers with the parent parser
default_command_parser(lowerCAmelCase_ , parents=[parent_parser] )
update_command_parser(lowerCAmelCase_ , parents=[parent_parser] )
return config_parser
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = get_config_parser()
lowerCAmelCase__ = config_parser.parse_args()
if not hasattr(lowerCAmelCase_ , "func" ):
config_parser.print_help()
exit(1 )
# Run
args.func(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 61 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 699 | 0 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def lowerCamelCase__ ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(A ):
requests.request('''GET''' , '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''' , '''https://huggingface.co''' , timeout=1.0 )
@pytest.mark.integration
def lowerCamelCase__ ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''' , '''https://huggingface.co''' )
def lowerCamelCase__ ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(A ):
http_head('''https://huggingface.co''' )
| 210 |
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> Optional[int]:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCAmelCase, int(b / 2 ) ) * actual_power(lowerCAmelCase, int(b / 2 ) )
else:
return a * actual_power(lowerCAmelCase, int(b / 2 ) ) * actual_power(lowerCAmelCase, int(b / 2 ) )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> float:
if b < 0:
return 1 / actual_power(lowerCAmelCase, lowerCAmelCase )
return actual_power(lowerCAmelCase, lowerCAmelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 699 | 0 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class lowercase__ ( __lowercase , unittest.TestCase ):
A__ : Optional[Any] =PegasusTokenizer
A__ : Optional[int] =PegasusTokenizerFast
A__ : List[Any] =True
A__ : Any =True
def A_ ( self : Optional[int] ):
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ = PegasusTokenizer(UpperCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A_ ( self : int ):
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def A_ ( self : List[Any] , **UpperCAmelCase_ : Any ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def A_ ( self : Tuple , UpperCAmelCase_ : List[str] ):
return ("This is a test", "This is a test")
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = '</s>'
SCREAMING_SNAKE_CASE__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ )
def A_ ( self : int ):
SCREAMING_SNAKE_CASE__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(UpperCamelCase__ ) , 1103 )
def A_ ( self : str ):
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def A_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
SCREAMING_SNAKE_CASE__ = rust_tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ).input_ids[0]
SCREAMING_SNAKE_CASE__ = py_tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ).input_ids[0]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def A_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
SCREAMING_SNAKE_CASE__ = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
SCREAMING_SNAKE_CASE__ = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
SCREAMING_SNAKE_CASE__ = tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ ).input_ids[0]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def A_ ( self : Any ):
SCREAMING_SNAKE_CASE__ = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
SCREAMING_SNAKE_CASE__ = 'To ensure a smooth flow of bank resolutions.'
SCREAMING_SNAKE_CASE__ = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
SCREAMING_SNAKE_CASE__ = tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ ).input_ids[0]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def A_ ( self : str ):
SCREAMING_SNAKE_CASE__ = ['This is going to be way too long.' * 150, 'short example']
SCREAMING_SNAKE_CASE__ = ['not super long but more than 5 tokens', 'tiny']
SCREAMING_SNAKE_CASE__ = self._large_tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ = self._large_tokenizer(
text_target=UpperCamelCase__ , max_length=5 , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCamelCase__ ) == 2 # input_ids, attention_mask.
@slow
def A_ ( self : Tuple ):
# fmt: off
SCREAMING_SNAKE_CASE__ = {'input_ids': [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase__ , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class lowercase__ ( __lowercase , unittest.TestCase ):
A__ : int =PegasusTokenizer
A__ : List[Any] =PegasusTokenizerFast
A__ : Dict =True
A__ : List[str] =True
def A_ ( self : List[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ = PegasusTokenizer(UpperCamelCase__ , offset=0 , mask_token_sent=UpperCamelCase__ , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A_ ( self : int ):
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def A_ ( self : List[str] , **UpperCAmelCase_ : int ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def A_ ( self : Tuple , UpperCAmelCase_ : List[str] ):
return ("This is a test", "This is a test")
def A_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = self.tokenizer_class.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE__ = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
SCREAMING_SNAKE_CASE__ = rust_tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ).input_ids[0]
SCREAMING_SNAKE_CASE__ = py_tokenizer([raw_input_str] , return_tensors=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ).input_ids[0]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@require_torch
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = ['This is going to be way too long.' * 1000, 'short example']
SCREAMING_SNAKE_CASE__ = ['not super long but more than 5 tokens', 'tiny']
SCREAMING_SNAKE_CASE__ = self._large_tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors='pt' )
SCREAMING_SNAKE_CASE__ = self._large_tokenizer(
text_target=UpperCamelCase__ , max_length=5 , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCamelCase__ ) == 2 # input_ids, attention_mask.
def A_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
SCREAMING_SNAKE_CASE__ = self._large_tokenizer(UpperCamelCase__ ).input_ids
self.assertListEqual(
UpperCamelCase__ , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
| 472 |
def __UpperCamelCase (lowerCAmelCase : list[int] ) -> int:
if not numbers:
return 0
if not isinstance(lowerCAmelCase, (list, tuple) ) or not all(
isinstance(lowerCAmelCase, lowerCAmelCase ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
A = A = A = numbers[0]
for i in range(1, len(lowerCAmelCase ) ):
# update the maximum and minimum subarray products
A = numbers[i]
if number < 0:
A , A = min_till_now, max_till_now
A = max(lowerCAmelCase, max_till_now * number )
A = min(lowerCAmelCase, min_till_now * number )
# update the maximum product found till now
A = max(lowerCAmelCase, lowerCAmelCase )
return max_prod
| 699 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def snake_case ( self : Tuple , lowercase__ : Any=0 ):
__lowercase : int = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(UpperCamelCase__ ) )
__lowercase : Dict = np.random.RandomState(UpperCamelCase__ )
__lowercase : str = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.7_5,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def snake_case ( self : Optional[Any] ):
__lowercase : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
__lowercase : Any = self.get_dummy_inputs()
__lowercase : Union[str, Any] = pipe(**UpperCamelCase__ ).images
__lowercase : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__lowercase : Optional[Any] = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def snake_case ( self : int ):
__lowercase : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
__lowercase : int = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
__lowercase : Union[str, Any] = self.get_dummy_inputs()
__lowercase : Any = pipe(**UpperCamelCase__ ).images
__lowercase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__lowercase : Union[str, Any] = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def snake_case ( self : Optional[int] ):
__lowercase : List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
__lowercase : int = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
# warmup pass to apply optimizations
__lowercase : Dict = pipe(**self.get_dummy_inputs() )
__lowercase : Any = self.get_dummy_inputs()
__lowercase : Optional[int] = pipe(**UpperCamelCase__ ).images
__lowercase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__lowercase : Any = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def snake_case ( self : Tuple ):
__lowercase : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
__lowercase : Optional[int] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
__lowercase : str = self.get_dummy_inputs()
__lowercase : Any = pipe(**UpperCamelCase__ ).images
__lowercase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__lowercase : str = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def snake_case ( self : str ):
__lowercase : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
__lowercase : str = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
__lowercase : Tuple = self.get_dummy_inputs()
__lowercase : List[Any] = pipe(**UpperCamelCase__ ).images
__lowercase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__lowercase : List[str] = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def snake_case ( self : Tuple ):
__lowercase : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
__lowercase : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
__lowercase : Dict = self.get_dummy_inputs()
__lowercase : Optional[int] = pipe(**UpperCamelCase__ ).images
__lowercase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
__lowercase : int = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@property
def snake_case ( self : int ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case ( self : Optional[int] ):
__lowercase : Tuple = ort.SessionOptions()
__lowercase : List[Any] = False
return options
def snake_case ( self : int ):
__lowercase : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
__lowercase : Optional[Any] = init_image.resize((7_6_8, 5_1_2) )
# using the PNDM scheduler by default
__lowercase : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
__lowercase : List[Any] = "A fantasy landscape, trending on artstation"
__lowercase : Union[str, Any] = np.random.RandomState(0 )
__lowercase : List[Any] = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=1_0 , generator=UpperCamelCase__ , output_type="np" , )
__lowercase : Optional[Any] = output.images
__lowercase : List[str] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
__lowercase : List[Any] = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def snake_case ( self : Any ):
__lowercase : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
__lowercase : Optional[Any] = init_image.resize((7_6_8, 5_1_2) )
__lowercase : str = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
__lowercase : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
__lowercase : str = "A fantasy landscape, trending on artstation"
__lowercase : Any = np.random.RandomState(0 )
__lowercase : Any = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=2_0 , generator=UpperCamelCase__ , output_type="np" , )
__lowercase : int = output.images
__lowercase : Union[str, Any] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
__lowercase : List[str] = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 575 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 699 | 0 |
import datasets
_lowerCAmelCase : Optional[Any] = '''\\n@InProceedings{conneau2018xnli,\n author = \"Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin\",\n title = \"XNLI: Evaluating Cross-lingual Sentence Representations\",\n booktitle = \"Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n location = \"Brussels, Belgium\",\n}\n'''
_lowerCAmelCase : Tuple = '''\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n'''
_lowerCAmelCase : Any = '''\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric(\"xnli\")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n'''
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] ) -> Optional[Any]:
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
"references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ),
} ) , codebase_urls=[] , reference_urls=[] , format="numpy" , )
def SCREAMING_SNAKE_CASE ( self :str , snake_case :Tuple , snake_case :Any ):
'''simple docstring'''
return {"accuracy": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )}
| 454 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = '''biogpt'''
def __init__( self : Optional[Any] , UpperCamelCase__ : str=42384 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : Dict=24 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : str=4096 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Dict=1e-1_2 , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Optional[Any]=2 , **UpperCamelCase__ : List[Any] , ):
A = vocab_size
A = max_position_embeddings
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = scale_embedding
A = use_cache
A = layerdrop
A = activation_dropout
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 699 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import MutableSequence
class _lowerCAmelCase :
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
if len(UpperCamelCase__ ) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1." )
snake_case : int = list(UpperCamelCase__ )
snake_case : Dict = degree
def __add__( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if self.degree > polynomial_a.degree:
snake_case : int = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , UpperCamelCase__ )
else:
snake_case : Optional[int] = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , UpperCamelCase__ )
def __sub__( self , UpperCamelCase__ ) -> Any:
'''simple docstring'''
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self ) -> str:
'''simple docstring'''
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self , UpperCamelCase__ ) -> str:
'''simple docstring'''
snake_case : List[str] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
snake_case : List[str] = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ) -> str:
'''simple docstring'''
snake_case : Optional[int] = ""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(UpperCamelCase__ )
return polynomial
def __repr__( self ) -> Tuple:
'''simple docstring'''
return self.__str__()
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
snake_case : Union[str, Any] = [0] * self.degree
for i in range(self.degree ):
snake_case : str = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ = 0 ) -> List[str]:
'''simple docstring'''
snake_case : Optional[Any] = [0] * (self.degree + 2)
snake_case : Optional[int] = constant
for i in range(self.degree + 1 ):
snake_case : List[str] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , UpperCamelCase__ )
def __eq__( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
return not self.__eq__(UpperCamelCase__ )
| 178 |
import sys
def __UpperCamelCase (lowerCAmelCase : Dict ) -> Dict:
A = len(lowerCAmelCase )
A = [[0 for x in range(lowerCAmelCase )] for x in range(lowerCAmelCase )]
A = [[0 for x in range(lowerCAmelCase )] for x in range(lowerCAmelCase )]
for chain_length in range(2, lowerCAmelCase ):
for a in range(1, n - chain_length + 1 ):
A = a + chain_length - 1
A = sys.maxsize
for c in range(lowerCAmelCase, lowerCAmelCase ):
A = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
A = cost
A = c
return matrix, sol
def __UpperCamelCase (lowerCAmelCase : Optional[Any], lowerCAmelCase : Union[str, Any], lowerCAmelCase : Union[str, Any] ) -> List[str]:
if i == j:
print('A' + str(lowerCAmelCase ), end=' ' )
else:
print('(', end=' ' )
print_optiomal_solution(lowerCAmelCase, lowerCAmelCase, optimal_solution[i][j] )
print_optiomal_solution(lowerCAmelCase, optimal_solution[i][j] + 1, lowerCAmelCase )
print(')', end=' ' )
def __UpperCamelCase () -> List[str]:
A = [30, 35, 15, 5, 10, 20, 25]
A = len(lowerCAmelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
A , A = matrix_chain_order(lowerCAmelCase )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowerCAmelCase, 1, n - 1 )
if __name__ == "__main__":
main()
| 699 | 0 |
'''simple docstring'''
from math import isqrt
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Dict = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __UpperCAmelCase , __UpperCAmelCase ):
lowerCamelCase_ : List[Any] = False
return [i for i in range(2 , __UpperCAmelCase ) if is_prime[i]]
def __snake_case (__UpperCAmelCase = 10**8 ):
"""simple docstring"""
lowerCamelCase_ : Tuple = calculate_prime_numbers(max_number // 2 )
lowerCamelCase_ : List[str] = 0
lowerCamelCase_ : Optional[int] = 0
lowerCamelCase_ : str = len(__UpperCAmelCase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 501 |
from math import isqrt
def __UpperCamelCase (lowerCAmelCase : int ) -> bool:
return all(number % divisor != 0 for divisor in range(2, isqrt(lowerCAmelCase ) + 1 ) )
def __UpperCamelCase (lowerCAmelCase : int = 10**6 ) -> int:
A = 0
A = 1
A = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowerCAmelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 699 | 0 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def lowerCAmelCase__ ( UpperCamelCase_ : int , UpperCamelCase_ : List[Any] )-> int:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
A__ = flax_key_tuple[:-1] + ('''weight''',)
A__ = torch.permute(UpperCamelCase_ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(UpperCamelCase_ ):
# linear layer
A__ = flax_key_tuple[:-1] + ('''weight''',)
A__ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
A__ = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def lowerCAmelCase__ ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] )-> int:
if "metadata" in layer:
A__ = layer.split('''metadata''' )
A__ = ''''''.join(split_layer[0] )[:-1]
A__ = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )]
elif "kvstore" in layer:
A__ = layer.split('''kvstore''' )
A__ = ''''''.join(split_layer[0] )[:-1]
A__ = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )]
else:
A__ = layer.split('''/''' )
A__ = '''/'''.join(split_layer[:-1] )
A__ = (split_layer[-1],)
if "kvstore/path" in layer:
A__ = f"{switch_checkpoint_path}/{checkpoint_info[layer]}"
elif "kvstore/driver" in layer:
A__ = '''file'''
else:
A__ = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def lowerCAmelCase__ ( UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] )-> Tuple:
A__ = rename_keys(UpperCamelCase_ )
A__ = {}
for k, v in current_block.items():
A__ = v
A__ = new_current_block
torch.save(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( UpperCamelCase_ : Any , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : str = WEIGHTS_NAME )-> List[Any]:
A__ = convert_file_size_to_int(UpperCamelCase_ )
A__ = []
A__ = {}
A__ = 0
A__ = 0
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp:
A__ = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
A__ = flatten_dict(UpperCamelCase_ , sep='''/''' )
A__ = {}
for layer in checkpoint_info.keys():
A__ , A__ , A__ = get_key_and_tensorstore_dict(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if curr_real_layer_name in all_layers:
A__ = content
else:
A__ = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
A__ = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
A__ = torch.tensor(UpperCamelCase_ )
A__ = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
A__ , A__ = rename_base_flax_keys(tuple(key.split('''/''' ) ) , UpperCamelCase_ )
A__ = '''/'''.join(UpperCamelCase_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
A__ = os.path.join(
UpperCamelCase_ , weights_name.replace('''.bin''' , f"-{len(UpperCamelCase_ )+1:05d}-of-???.bin" ) )
rename_and_save_block(UpperCamelCase_ , UpperCamelCase_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
A__ = {}
A__ = 0
A__ = raw_weights.to(getattr(UpperCamelCase_ , UpperCamelCase_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
A__ = os.path.join(UpperCamelCase_ , weights_name.replace('''.bin''' , f"-{len(UpperCamelCase_ )+1:05d}-of-???.bin" ) )
rename_and_save_block(UpperCamelCase_ , UpperCamelCase_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(UpperCamelCase_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
A__ = {}
A__ = {}
for idx, shard in enumerate(UpperCamelCase_ ):
A__ = weights_name.replace(
'''.bin''' , f"-{idx+1:05d}-of-{len(UpperCamelCase_ ):05d}.bin" ) # len(sharded_state_dicts):05d}
A__ = os.path.join(UpperCamelCase_ , weights_name.replace('''.bin''' , f"-{idx+1:05d}-of-???.bin" ) )
os.rename(UpperCamelCase_ , os.path.join(UpperCamelCase_ , UpperCamelCase_ ) )
A__ = shard
for key in shard:
A__ = shard_file
# Add the metadata
A__ = {'''total_size''': total_size}
A__ = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , '''w''' , encoding='''utf-8''' ) as f:
A__ = json.dumps(UpperCamelCase_ , indent=2 , sort_keys=UpperCamelCase_ ) + '''\n'''
f.write(UpperCamelCase_ )
return metadata, index
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
_lowercase = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def lowerCAmelCase__ ( )-> str:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
A__ = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' )
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' )
A__ = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' )
A__ = TaTokenizer.from_pretrained('''t5-small''' )
A__ = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
A__ = tokenizer(UpperCamelCase_ , return_tensors='''pt''' ).input_ids
A__ = model.generate(UpperCamelCase_ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 632 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
_UpperCAmelCase = logging.get_logger(__name__)
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
def __init__( self : List[str] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Tuple ):
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 699 | 0 |
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=5 ) -> List[Any]:
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('''<mask>''' ) == 1
__lowercase : str = torch.tensor(tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ) ).unsqueeze(0 ) # Batch size 1
__lowercase : List[Any] = model(__lowerCAmelCase )[0] # The last hidden-state is the first element of the output tuple
__lowercase : str = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
__lowercase : Tuple = logits[0, masked_index, :]
__lowercase : Dict = logits.softmax(dim=0 )
__lowercase , __lowercase : Dict = prob.topk(k=__lowerCAmelCase , dim=0 )
__lowercase : Union[str, Any] = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(__lowerCAmelCase ) )] )
__lowercase : List[str] = tokenizer.mask_token
__lowercase : List[Any] = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ):
__lowercase : Union[str, Any] = predicted_token_bpe.replace('''\u2581''' , ''' ''' )
if " {0}".format(__lowerCAmelCase ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(__lowerCAmelCase ) , __lowerCAmelCase ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(__lowerCAmelCase , __lowerCAmelCase ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
__lowerCAmelCase : Optional[int] = CamembertTokenizer.from_pretrained("camembert-base")
__lowerCAmelCase : Dict = CamembertForMaskedLM.from_pretrained("camembert-base")
model.eval()
__lowerCAmelCase : Optional[Any] = "Le camembert est <mask> :)"
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 509 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str = "geglu" , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : str = "layer_norm" , UpperCamelCase__ : bool = False , ):
super().__init__()
A = only_cross_attention
A = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
A = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
A = AdaLayerNorm(UpperCamelCase__ , UpperCamelCase__ )
elif self.use_ada_layer_norm_zero:
A = AdaLayerNormZero(UpperCamelCase__ , UpperCamelCase__ )
else:
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
A = Attention(
query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , dropout=UpperCamelCase__ , bias=UpperCamelCase__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=UpperCamelCase__ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
A = (
AdaLayerNorm(UpperCamelCase__ , UpperCamelCase__ )
if self.use_ada_layer_norm
else nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
)
A = Attention(
query_dim=UpperCamelCase__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , dropout=UpperCamelCase__ , bias=UpperCamelCase__ , upcast_attention=UpperCamelCase__ , ) # is self-attn if encoder_hidden_states is none
else:
A = None
A = None
# 3. Feed-forward
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
A = FeedForward(UpperCamelCase__ , dropout=UpperCamelCase__ , activation_fn=UpperCamelCase__ , final_dropout=UpperCamelCase__ )
# let chunk size default to None
A = None
A = 0
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ):
# Sets chunk feed-forward
A = chunk_size
A = dim
def UpperCamelCase ( self : Dict , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , UpperCamelCase__ : Dict[str, Any] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
A = self.norma(UpperCamelCase__ , UpperCamelCase__ )
elif self.use_ada_layer_norm_zero:
A , A , A , A , A = self.norma(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hidden_dtype=hidden_states.dtype )
else:
A = self.norma(UpperCamelCase__ )
A = cross_attention_kwargs if cross_attention_kwargs is not None else {}
A = self.attna(
UpperCamelCase__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
if self.use_ada_layer_norm_zero:
A = gate_msa.unsqueeze(1 ) * attn_output
A = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
A = (
self.norma(UpperCamelCase__ , UpperCamelCase__ ) if self.use_ada_layer_norm else self.norma(UpperCamelCase__ )
)
A = self.attna(
UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
A = attn_output + hidden_states
# 3. Feed-forward
A = self.norma(UpperCamelCase__ )
if self.use_ada_layer_norm_zero:
A = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
A = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
A = torch.cat(
[self.ff(UpperCamelCase__ ) for hid_slice in norm_hidden_states.chunk(UpperCamelCase__ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
A = self.ff(UpperCamelCase__ )
if self.use_ada_layer_norm_zero:
A = gate_mlp.unsqueeze(1 ) * ff_output
A = ff_output + hidden_states
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 4 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : str = "geglu" , UpperCamelCase__ : bool = False , ):
super().__init__()
A = int(dim * mult )
A = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
A = GELU(UpperCamelCase__ , UpperCamelCase__ )
if activation_fn == "gelu-approximate":
A = GELU(UpperCamelCase__ , UpperCamelCase__ , approximate='tanh' )
elif activation_fn == "geglu":
A = GEGLU(UpperCamelCase__ , UpperCamelCase__ )
elif activation_fn == "geglu-approximate":
A = ApproximateGELU(UpperCamelCase__ , UpperCamelCase__ )
A = nn.ModuleList([] )
# project in
self.net.append(UpperCamelCase__ )
# project dropout
self.net.append(nn.Dropout(UpperCamelCase__ ) )
# project out
self.net.append(nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(UpperCamelCase__ ) )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : int ):
for module in self.net:
A = module(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : str = "none" ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
A = approximate
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Dict ):
if gate.device.type != "mps":
return F.gelu(UpperCamelCase__ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def UpperCamelCase ( self : Any , UpperCamelCase__ : int ):
A = self.proj(UpperCamelCase__ )
A = self.gelu(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , dim_out * 2 )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Tuple ):
if gate.device.type != "mps":
return F.gelu(UpperCamelCase__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def UpperCamelCase ( self : str , UpperCamelCase__ : str ):
A , A = self.proj(UpperCamelCase__ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(UpperCamelCase__ )
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase ( self : Any , UpperCamelCase__ : Optional[int] ):
A = self.proj(UpperCamelCase__ )
return x * torch.sigmoid(1.702 * x )
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ):
super().__init__()
A = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
A = nn.SiLU()
A = nn.Linear(UpperCamelCase__ , embedding_dim * 2 )
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ):
A = self.linear(self.silu(self.emb(UpperCamelCase__ ) ) )
A , A = torch.chunk(UpperCamelCase__ , 2 )
A = self.norm(UpperCamelCase__ ) * (1 + scale) + shift
return x
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : str , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ):
super().__init__()
A = CombinedTimestepLabelEmbeddings(UpperCamelCase__ , UpperCamelCase__ )
A = nn.SiLU()
A = nn.Linear(UpperCamelCase__ , 6 * embedding_dim , bias=UpperCamelCase__ )
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ , eps=1e-6 )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=None ):
A = self.linear(self.silu(self.emb(UpperCamelCase__ , UpperCamelCase__ , hidden_dtype=UpperCamelCase__ ) ) )
A , A , A , A , A , A = emb.chunk(6 , dim=1 )
A = self.norm(UpperCamelCase__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : float = 1e-5 ):
super().__init__()
A = num_groups
A = eps
if act_fn is None:
A = None
else:
A = get_activation(UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , out_dim * 2 )
def UpperCamelCase ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : str ):
if self.act:
A = self.act(UpperCamelCase__ )
A = self.linear(UpperCamelCase__ )
A = emb[:, :, None, None]
A , A = emb.chunk(2 , dim=1 )
A = F.group_norm(UpperCamelCase__ , self.num_groups , eps=self.eps )
A = x * (1 + scale) + shift
return x
| 699 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 470 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"vocab_file": "vocab.json",
"tokenizer_config_file": "tokenizer_config.json",
"merges_file": "merges.txt",
}
_UpperCAmelCase = {
"vocab_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"
),
},
"tokenizer_config_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"
),
},
"merges_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"
),
},
}
_UpperCAmelCase = "</w>"
_UpperCAmelCase = "@@ "
def __UpperCamelCase (lowerCAmelCase : Optional[int] ) -> List[str]:
A = set()
A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A = char
return pairs
# Speech2Text2 has no max input length
_UpperCAmelCase = {"facebook/s2t-wav2vec2-large-en-de": 1_024}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Any = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int]="<s>" , UpperCamelCase__ : str="<pad>" , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : Tuple="<unk>" , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : List[str]=None , **UpperCamelCase__ : Optional[int] , ):
super().__init__(
unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , **UpperCamelCase__ , )
A = do_lower_case
with open(UpperCamelCase__ , encoding='utf-8' ) as vocab_handle:
A = json.load(UpperCamelCase__ )
A = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
A = None
A = None
else:
with open(UpperCamelCase__ , encoding='utf-8' ) as merges_handle:
A = merges_handle.read().split('\n' )[:-1]
A = [tuple(merge.split()[:2] ) for merge in merges]
A = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
A = {}
@property
def UpperCamelCase ( self : Union[str, Any] ):
return len(self.decoder )
def UpperCamelCase ( self : Optional[Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Optional[int] ):
A = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
A = get_pairs(UpperCamelCase__ )
if not pairs:
return token
while True:
A = min(UpperCamelCase__ , key=lambda UpperCamelCase__ : self.bpe_ranks.get(UpperCamelCase__ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
A , A = bigram
A = []
A = 0
while i < len(UpperCamelCase__ ):
try:
A = word.index(UpperCamelCase__ , UpperCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A = j
if word[i] == first and i < len(UpperCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A = tuple(UpperCamelCase__ )
A = new_word
if len(UpperCamelCase__ ) == 1:
break
else:
A = get_pairs(UpperCamelCase__ )
A = ' '.join(UpperCamelCase__ )
if word == "\n " + BPE_TOKEN_MERGES:
A = '\n' + BPE_TOKEN_MERGES
if word.endswith(UpperCamelCase__ ):
A = word.replace(UpperCamelCase__ , '' )
A = word.replace(' ' , UpperCamelCase__ )
A = word
return word
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : Dict ):
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
A = text.lower()
A = text.split()
A = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(UpperCamelCase__ ).split(' ' ) ) )
return split_tokens
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : str ):
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def UpperCamelCase ( self : str , UpperCamelCase__ : int ):
A = self.decoder.get(UpperCamelCase__ , self.unk_token )
return result
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : List[str] ):
A = ' '.join(UpperCamelCase__ )
# make sure @@ tokens are concatenated
A = ''.join(string.split(UpperCamelCase__ ) )
return string
def UpperCamelCase ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) + '\n' )
A = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
A = token_index
writer.write(' '.join(UpperCamelCase__ ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 699 | 0 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
def __init__( self :List[str] , lowercase_ :List[str] , lowercase_ :str=7 , lowercase_ :Tuple=3 , lowercase_ :Tuple=30 , lowercase_ :List[str]=4_00 , lowercase_ :int=True , lowercase_ :Any=None , lowercase_ :List[str]=True , lowercase_ :Optional[int]=[0.5, 0.5, 0.5] , lowercase_ :Any=[0.5, 0.5, 0.5] , lowercase_ :Any=True , lowercase_ :List[str]=1 / 2_55 , lowercase_ :Union[str, Any]=True , )-> int:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
A__ = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = do_normalize
A__ = image_mean
A__ = image_std
A__ = do_rescale
A__ = rescale_factor
A__ = do_pad
def UpperCAmelCase_ ( self :Union[str, Any] )-> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :Dict , lowercase_ :Optional[Any]=False )-> List[Any]:
if not batched:
A__ = image_inputs[0]
if isinstance(UpperCamelCase__ , Image.Image ):
A__, A__ = image.size
else:
A__, A__ = image.shape[1], image.shape[2]
if w < h:
A__ = int(self.size["shortest_edge"] * h / w )
A__ = self.size["shortest_edge"]
elif w > h:
A__ = self.size["shortest_edge"]
A__ = int(self.size["shortest_edge"] * w / h )
else:
A__ = self.size["shortest_edge"]
A__ = self.size["shortest_edge"]
else:
A__ = []
for image in image_inputs:
A__, A__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A__ = max(UpperCamelCase__ , key=lambda lowercase_ : item[0] )[0]
A__ = max(UpperCamelCase__ , key=lambda lowercase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase ( __lowercase , unittest.TestCase ):
__lowercase = ConditionalDetrImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self :int )-> Optional[Any]:
A__ = ConditionalDetrImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self :str )-> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self :Any )-> Optional[Any]:
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , "image_mean" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "image_std" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_resize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "size" ) )
def UpperCAmelCase_ ( self :Optional[int] )-> List[Any]:
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , UpperCamelCase__ )
A__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase__ )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , UpperCamelCase__ )
def UpperCAmelCase_ ( self :Tuple )-> Optional[Any]:
pass
def UpperCAmelCase_ ( self :List[str] )-> Any:
# Initialize image_processing
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
A__, A__ = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__, A__ = self.image_processor_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
A__ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self :Optional[int] )-> int:
# Initialize image_processing
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
A__, A__ = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
A__, A__ = self.image_processor_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCAmelCase_ ( self :Optional[Any] )-> Dict:
# Initialize image_processing
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
A__, A__ = self.image_processor_tester.get_expected_values(UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
A__ = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
A__, A__ = self.image_processor_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCAmelCase_ ( self :List[str] )-> int:
# prepare image and target
A__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
A__ = json.loads(f.read() )
A__ = {"image_id": 3_97_69, "annotations": target}
# encode them
A__ = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
A__ = image_processing(images=UpperCamelCase__ , annotations=UpperCamelCase__ , return_tensors="pt" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , UpperCamelCase__ )
A__ = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , UpperCamelCase__ , atol=1E-4 ) )
# verify area
A__ = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , UpperCamelCase__ ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , UpperCamelCase__ )
A__ = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , UpperCamelCase__ , atol=1E-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , UpperCamelCase__ ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , UpperCamelCase__ ) )
# verify class_labels
A__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , UpperCamelCase__ ) )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , UpperCamelCase__ ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , UpperCamelCase__ ) )
@slow
def UpperCAmelCase_ ( self :int )-> int:
# prepare image, target and masks_path
A__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
A__ = json.loads(f.read() )
A__ = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
A__ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
A__ = ConditionalDetrImageProcessor(format="coco_panoptic" )
A__ = image_processing(images=UpperCamelCase__ , annotations=UpperCamelCase__ , masks_path=UpperCamelCase__ , return_tensors="pt" )
# verify pixel values
A__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , UpperCamelCase__ )
A__ = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , UpperCamelCase__ , atol=1E-4 ) )
# verify area
A__ = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , UpperCamelCase__ ) )
# verify boxes
A__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , UpperCamelCase__ )
A__ = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , UpperCamelCase__ , atol=1E-3 ) )
# verify image_id
A__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , UpperCamelCase__ ) )
# verify is_crowd
A__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , UpperCamelCase__ ) )
# verify class_labels
A__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , UpperCamelCase__ ) )
# verify masks
A__ = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , UpperCamelCase__ )
# verify orig_size
A__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , UpperCamelCase__ ) )
# verify size
A__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , UpperCamelCase__ ) )
| 440 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = '''facebook/bart-large-mnli'''
SCREAMING_SNAKE_CASE : Union[str, Any] = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
SCREAMING_SNAKE_CASE : Any = '''text_classifier'''
SCREAMING_SNAKE_CASE : Any = AutoTokenizer
SCREAMING_SNAKE_CASE : Dict = AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE : List[Any] = ['''text''', ['''text''']]
SCREAMING_SNAKE_CASE : Dict = ['''text''']
def UpperCamelCase ( self : List[str] ):
super().setup()
A = self.model.config
A = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
A = int(UpperCamelCase__ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def UpperCamelCase ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ):
A = labels
return self.pre_processor(
[text] * len(UpperCamelCase__ ) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def UpperCamelCase ( self : int , UpperCamelCase__ : List[str] ):
A = outputs.logits
A = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 699 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 61 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase (lowerCAmelCase : List[str] ) -> Dict:
A = r'\w+[.]\d+'
A = re.findall(lowerCAmelCase, lowerCAmelCase )
for pat in pats:
A = key.replace(lowerCAmelCase, '_'.join(pat.split('.' ) ) )
return key
def __UpperCamelCase (lowerCAmelCase : Optional[int], lowerCAmelCase : Dict, lowerCAmelCase : Dict ) -> Any:
A = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
A = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
A = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
A = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
A = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
A = pt_tensor.transpose(2, 3, 1, 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
A = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
A = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __UpperCamelCase (lowerCAmelCase : Tuple, lowerCAmelCase : Any, lowerCAmelCase : str=42 ) -> Any:
# Step 1: Convert pytorch tensor to numpy
A = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
A = flax_model.init_weights(PRNGKey(lowerCAmelCase ) )
A = flatten_dict(lowerCAmelCase )
A = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A = rename_key(lowerCAmelCase )
A = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
A , A = rename_key_and_reshape_tensor(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
A = jnp.asarray(lowerCAmelCase )
return unflatten_dict(lowerCAmelCase )
| 699 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class UpperCamelCase__:
__magic_name__ : int = XGLMConfig
__magic_name__ : Any = {}
__magic_name__ : Any = '''gelu'''
def __init__( self : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str]=14 , lowerCAmelCase : List[str]=7 , lowerCAmelCase : Dict=True , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : Optional[int]=99 , lowerCAmelCase : List[Any]=32 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : List[str]=4 , lowerCAmelCase : Optional[Any]=37 , lowerCAmelCase : List[str]="gelu" , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : List[str]=512 , lowerCAmelCase : Any=0.02 , )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = d_model
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = ffn_dim
UpperCAmelCase = activation_function
UpperCAmelCase = activation_dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = initializer_range
UpperCAmelCase = None
UpperCAmelCase = 0
UpperCAmelCase = 2
UpperCAmelCase = 1
def a__( self : Optional[int] )-> Optional[Any]:
"""simple docstring"""
return XGLMConfig.from_pretrained('''facebook/xglm-564M''' )
def a__( self : List[str] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = self.get_config()
UpperCAmelCase = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def a__( self : Optional[int] )-> Optional[int]:
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=UpperCamelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=UpperCamelCase__ , )
def a__( self : Any )-> Dict:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {
'''input_ids''': input_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_tf
class UpperCamelCase__( __lowercase , __lowercase , unittest.TestCase ):
__magic_name__ : int = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__magic_name__ : List[Any] = (TFXGLMForCausalLM,) if is_tf_available() else ()
__magic_name__ : List[Any] = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
__magic_name__ : Union[str, Any] = False
__magic_name__ : Union[str, Any] = False
__magic_name__ : List[str] = False
def a__( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = TFXGLMModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=UpperCamelCase__ , n_embd=37 )
def a__( self : str )-> int:
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def a__( self : int )-> List[Any]:
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = TFXGLMModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' )
def a__( self : int )-> Any:
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class UpperCamelCase__( unittest.TestCase ):
@slow
def a__( self : List[str] , lowerCAmelCase : Dict=True )-> Dict:
"""simple docstring"""
UpperCAmelCase = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
UpperCAmelCase = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
UpperCAmelCase = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581]
# fmt: on
UpperCAmelCase = model.generate(UpperCamelCase__ , do_sample=UpperCamelCase__ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , UpperCamelCase__ )
@slow
def a__( self : int )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
UpperCAmelCase = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
tf.random.set_seed(0 )
UpperCAmelCase = tokenizer('''Today is a nice day and''' , return_tensors='''tf''' )
UpperCAmelCase = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(''':/CPU:0''' ):
UpperCAmelCase = model.generate(UpperCamelCase__ , do_sample=UpperCamelCase__ , seed=[7, 0] )
UpperCAmelCase = tokenizer.decode(output_ids[0] , skip_special_tokens=UpperCamelCase__ )
UpperCAmelCase = (
'''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'''
)
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
@slow
def a__( self : Tuple )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' )
UpperCAmelCase = XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
UpperCAmelCase = '''left'''
# use different length sentences to test batching
UpperCAmelCase = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When''',
'''Hello, my dog is a little''',
]
UpperCAmelCase = tokenizer(UpperCamelCase__ , return_tensors='''tf''' , padding=UpperCamelCase__ )
UpperCAmelCase = inputs['''input_ids''']
UpperCAmelCase = model.generate(input_ids=UpperCamelCase__ , attention_mask=inputs['''attention_mask'''] , max_new_tokens=12 )
UpperCAmelCase = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
UpperCAmelCase = model.generate(input_ids=UpperCamelCase__ , max_new_tokens=12 )
UpperCAmelCase = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
UpperCAmelCase = model.generate(input_ids=UpperCamelCase__ , max_new_tokens=12 )
UpperCAmelCase = tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
UpperCAmelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCamelCase__ )
UpperCAmelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCamelCase__ )
UpperCAmelCase = [
'''This is an extremelly long sentence that only exists to test the ability of the model to cope with '''
'''left-padding, such as in batched generation. The output for the sequence below should be the same '''
'''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '''
'''a single''',
'''Hello, my dog is a little bit of a shy one, but he is very friendly''',
]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , [non_padded_sentence, padded_sentence] )
| 210 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : Collection[float] | None = None ):
if components is None:
A = []
A = list(UpperCamelCase__ )
def __len__( self : List[Any] ):
return len(self.__components )
def __str__( self : str ):
return "(" + ",".join(map(UpperCamelCase__ , self.__components ) ) + ")"
def __add__( self : str , UpperCamelCase__ : Vector ):
A = len(self )
if size == len(UpperCamelCase__ ):
A = [self.__components[i] + other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return Vector(UpperCamelCase__ )
else:
raise Exception('must have the same size' )
def __sub__( self : Dict , UpperCamelCase__ : Vector ):
A = len(self )
if size == len(UpperCamelCase__ ):
A = [self.__components[i] - other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return Vector(UpperCamelCase__ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self : Tuple , UpperCamelCase__ : float ):
...
@overload
def __mul__( self : Dict , UpperCamelCase__ : Vector ):
...
def __mul__( self : Union[str, Any] , UpperCamelCase__ : float | Vector ):
if isinstance(UpperCamelCase__ , (float, int) ):
A = [c * other for c in self.__components]
return Vector(UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(self ) == len(UpperCamelCase__ ):
A = len(self )
A = [self.__components[i] * other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return sum(UpperCamelCase__ )
else: # error case
raise Exception('invalid operand!' )
def UpperCamelCase ( self : Union[str, Any] ):
return Vector(self.__components )
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : int ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def UpperCamelCase ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : float ):
assert -len(self.__components ) <= pos < len(self.__components )
A = value
def UpperCamelCase ( self : str ):
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
A = [c**2 for c in self.__components]
return math.sqrt(sum(UpperCamelCase__ ) )
def UpperCamelCase ( self : Any , UpperCamelCase__ : Vector , UpperCamelCase__ : bool = False ):
A = self * other
A = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def __UpperCamelCase (lowerCAmelCase : int ) -> Vector:
assert isinstance(lowerCAmelCase, lowerCAmelCase )
return Vector([0] * dimension )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> Vector:
assert isinstance(lowerCAmelCase, lowerCAmelCase ) and (isinstance(lowerCAmelCase, lowerCAmelCase ))
A = [0] * dimension
A = 1
return Vector(lowerCAmelCase )
def __UpperCamelCase (lowerCAmelCase : float, lowerCAmelCase : Vector, lowerCAmelCase : Vector ) -> Vector:
assert (
isinstance(lowerCAmelCase, lowerCAmelCase )
and isinstance(lowerCAmelCase, lowerCAmelCase )
and (isinstance(lowerCAmelCase, (int, float) ))
)
return x * scalar + y
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int ) -> Vector:
random.seed(lowerCAmelCase )
A = [random.randint(lowerCAmelCase, lowerCAmelCase ) for _ in range(lowerCAmelCase )]
return Vector(lowerCAmelCase )
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : list[list[float]] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
A = matrix
A = w
A = h
def __str__( self : int ):
A = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Optional[Any] , UpperCamelCase__ : Matrix ):
if self.__width == other.width() and self.__height == other.height():
A = []
for i in range(self.__height ):
A = [
self.__matrix[i][j] + other.component(UpperCamelCase__ , UpperCamelCase__ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase__ )
return Matrix(UpperCamelCase__ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self : Dict , UpperCamelCase__ : Matrix ):
if self.__width == other.width() and self.__height == other.height():
A = []
for i in range(self.__height ):
A = [
self.__matrix[i][j] - other.component(UpperCamelCase__ , UpperCamelCase__ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase__ )
return Matrix(UpperCamelCase__ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self : int , UpperCamelCase__ : float ):
...
@overload
def __mul__( self : Union[str, Any] , UpperCamelCase__ : Vector ):
...
def __mul__( self : Tuple , UpperCamelCase__ : float | Vector ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ): # matrix-vector
if len(UpperCamelCase__ ) == self.__width:
A = zero_vector(self.__height )
for i in range(self.__height ):
A = [
self.__matrix[i][j] * other.component(UpperCamelCase__ )
for j in range(self.__width )
]
ans.change_component(UpperCamelCase__ , sum(UpperCamelCase__ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(UpperCamelCase__ , (int, float) ): # matrix-scalar
A = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(UpperCamelCase__ , self.__width , self.__height )
return None
def UpperCamelCase ( self : Optional[int] ):
return self.__height
def UpperCamelCase ( self : List[Any] ):
return self.__width
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def UpperCamelCase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float ):
if 0 <= x < self.__height and 0 <= y < self.__width:
A = value
else:
raise Exception('change_component: indices out of bounds' )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
A = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(UpperCamelCase__ ) ):
A = minor[i][:y] + minor[i][y + 1 :]
return Matrix(UpperCamelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def UpperCamelCase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(UpperCamelCase__ , UpperCamelCase__ )
else:
raise Exception('Indices out of bounds' )
def UpperCamelCase ( self : Tuple ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
A = [
self.__matrix[0][y] * self.cofactor(0 , UpperCamelCase__ ) for y in range(self.__width )
]
return sum(UpperCamelCase__ )
def __UpperCamelCase (lowerCAmelCase : int ) -> Matrix:
A = [[0] * n for _ in range(lowerCAmelCase )]
return Matrix(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int ) -> Matrix:
random.seed(lowerCAmelCase )
A = [
[random.randint(lowerCAmelCase, lowerCAmelCase ) for _ in range(lowerCAmelCase )] for _ in range(lowerCAmelCase )
]
return Matrix(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
| 699 | 0 |
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
__snake_case = datasets.utils.logging.get_logger(__name__)
class lowercase__ ( folder_based_builder.FolderBasedBuilderConfig ):
A__ : bool =None
A__ : bool =None
class lowercase__ ( folder_based_builder.FolderBasedBuilder ):
A__ : int =datasets.Audio()
A__ : Union[str, Any] ='''audio'''
A__ : List[Any] =AudioFolderConfig
A__ : List[str] # definition at the bottom of the script
A__ : Any =AudioClassification(audio_column="""audio""" , label_column="""label""" )
__snake_case = [
""".aiff""",
""".au""",
""".avr""",
""".caf""",
""".flac""",
""".htk""",
""".svx""",
""".mat4""",
""".mat5""",
""".mpc2k""",
""".ogg""",
""".paf""",
""".pvf""",
""".raw""",
""".rf64""",
""".sd2""",
""".sds""",
""".ircam""",
""".voc""",
""".w64""",
""".wav""",
""".nist""",
""".wavex""",
""".wve""",
""".xi""",
""".mp3""",
""".opus""",
]
__snake_case = AUDIO_EXTENSIONS
| 472 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = '''blenderbot-small'''
SCREAMING_SNAKE_CASE : Any = ['''past_key_values''']
SCREAMING_SNAKE_CASE : List[str] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] , UpperCamelCase__ : Optional[Any]=50265 , UpperCamelCase__ : Optional[int]=512 , UpperCamelCase__ : int=8 , UpperCamelCase__ : Optional[int]=2048 , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Optional[Any]=8 , UpperCamelCase__ : List[Any]=2048 , UpperCamelCase__ : int=16 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Any=512 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Union[str, Any]=0.0 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Dict=2 , **UpperCamelCase__ : List[str] , ):
A = vocab_size
A = max_position_embeddings
A = d_model
A = encoder_ffn_dim
A = encoder_layers
A = encoder_attention_heads
A = decoder_ffn_dim
A = decoder_layers
A = decoder_attention_heads
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = encoder_layerdrop
A = decoder_layerdrop
A = use_cache
A = encoder_layers
A = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
@property
def UpperCamelCase ( self : List[Any] ):
if self.task in ["default", "seq2seq-lm"]:
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
A = {0: 'batch'}
A = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
A = {0: 'batch', 1: 'decoder_sequence'}
A = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
A , A = self.num_layers
for i in range(UpperCamelCase__ ):
A = {0: 'batch', 2: 'past_sequence + sequence'}
A = {0: 'batch', 2: 'past_sequence + sequence'}
else:
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def UpperCamelCase ( self : int ):
if self.task in ["default", "seq2seq-lm"]:
A = super().outputs
else:
A = super(UpperCamelCase__ , self ).outputs
if self.use_past:
A , A = self.num_layers
for i in range(UpperCamelCase__ ):
A = {0: 'batch', 2: 'past_sequence + sequence'}
A = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def UpperCamelCase ( self : int , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Generate decoder inputs
A = seq_length if not self.use_past else 1
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
A = dict(**UpperCamelCase__ , **UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A , A = common_inputs['input_ids'].shape
A = common_inputs['decoder_input_ids'].shape[1]
A , A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = decoder_seq_length + 3
A = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(UpperCamelCase__ , UpperCamelCase__ )] , dim=1 )
A = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A , A = self.num_layers
A = min(UpperCamelCase__ , UpperCamelCase__ )
A = max(UpperCamelCase__ , UpperCamelCase__ ) - min_num_layers
A = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(UpperCamelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
) )
# TODO: test this.
A = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(UpperCamelCase__ , UpperCamelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) )
return common_inputs
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A , A = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
A = seqlen + 2
A , A = self.num_layers
A , A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = common_inputs['attention_mask'].dtype
A = torch.cat(
[common_inputs['attention_mask'], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
A = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(UpperCamelCase__ )
]
return common_inputs
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A = tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
A = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
A = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
A = dict(tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ ) )
return common_inputs
def UpperCamelCase ( self : Any , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
A = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
elif self.task == "causal-lm":
A = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
else:
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
return common_inputs
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple ):
if self.task in ["default", "seq2seq-lm"]:
A = super()._flatten_past_key_values_(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
A = super(UpperCamelCase__ , self )._flatten_past_key_values_(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
| 699 | 0 |
"""simple docstring"""
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def snake_case__ ( _lowerCamelCase=None, _lowerCamelCase=None ) ->Any:
"""simple docstring"""
return field(default_factory=lambda: default, metadata=_lowerCamelCase )
@dataclass
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : str = field(
metadata={"help": "The csv file to plot."} , )
__UpperCAmelCase : bool = field(
default=__lowercase , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
__UpperCAmelCase : bool = field(
default=__lowercase , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
__UpperCAmelCase : bool = field(
default=__lowercase , metadata={"help": "Disable logarithmic scale when plotting"} , )
__UpperCAmelCase : bool = field(
default=__lowercase , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
__UpperCAmelCase : Optional[str] = field(
default=__lowercase , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
__UpperCAmelCase : Optional[List[str]] = list_field(
default=__lowercase , metadata={"help": "List of model names that are used instead of the ones in the csv file."} )
def snake_case__ ( _lowerCamelCase ) ->List[Any]:
"""simple docstring"""
try:
int(_lowerCamelCase )
return True
except ValueError:
return False
def snake_case__ ( _lowerCamelCase ) ->Any:
"""simple docstring"""
try:
float(_lowerCamelCase )
return True
except ValueError:
return False
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Any , lowercase__ : Tuple ):
__lowercase : str = args
__lowercase : Optional[Any] = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="" ) as csv_file:
__lowercase : Optional[Any] = csv.DictReader(UpperCamelCase__ )
for row in reader:
__lowercase : str = row["model"]
self.result_dict[model_name]["bsz"].append(int(row["batch_size"] ) )
self.result_dict[model_name]["seq_len"].append(int(row["sequence_length"] ) )
if can_convert_to_int(row["result"] ):
# value is not None
__lowercase : str = int(row["result"] )
elif can_convert_to_float(row["result"] ):
# value is not None
__lowercase : str = float(row["result"] )
def snake_case ( self : Optional[Any] ):
__lowercase ,__lowercase : List[str] = plt.subplots()
__lowercase : List[str] = "Time usage" if self.args.is_time else "Memory usage"
__lowercase : Any = title_str + " for training" if self.args.is_train else title_str + " for inference"
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("log" )
ax.set_yscale("log" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
__lowercase : int = sorted(set(self.result_dict[model_name]["bsz"] ) )
__lowercase : Any = sorted(set(self.result_dict[model_name]["seq_len"] ) )
__lowercase : Optional[int] = self.result_dict[model_name]["result"]
((__lowercase) ,(__lowercase)) : Optional[Any] = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
__lowercase : Tuple = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
__lowercase : Optional[Any] = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=UpperCamelCase__ , )
else:
__lowercase : str = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((__lowercase) ,(__lowercase)) : Dict = (
("batch_size", "len") if self.args.plot_along_batch else ("in #tokens", "bsz")
)
__lowercase : Union[str, Any] = np.asarray(UpperCamelCase__ , UpperCamelCase__ )[: len(UpperCamelCase__ )]
plt.scatter(
UpperCamelCase__ , UpperCamelCase__ , label=f'{label_model_name} - {inner_loop_label}: {inner_loop_value}' )
plt.plot(UpperCamelCase__ , UpperCamelCase__ , "--" )
title_str += f' {label_model_name} vs.'
__lowercase : List[str] = title_str[:-4]
__lowercase : int = "Time in s" if self.args.is_time else "Memory in MB"
# plot
plt.title(UpperCamelCase__ )
plt.xlabel(UpperCamelCase__ )
plt.ylabel(UpperCamelCase__ )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def snake_case__ ( ) ->str:
"""simple docstring"""
__lowercase : List[str] = HfArgumentParser(_lowerCamelCase )
__lowercase : List[str] = parser.parse_args_into_dataclasses()[0]
__lowercase : Dict = Plot(args=_lowerCamelCase )
plot.plot()
if __name__ == "__main__":
main()
| 575 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE : List[str] = '''BridgeTowerImageProcessor'''
SCREAMING_SNAKE_CASE : Tuple = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ):
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCamelCase__ : Union[bool, str, TruncationStrategy] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : List[Any] , ):
A = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
# add pixel_values + pixel_mask
A = self.image_processor(
UpperCamelCase__ , return_tensors=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_center_crop=UpperCamelCase__ , **UpperCamelCase__ )
encoding.update(UpperCamelCase__ )
return encoding
def UpperCamelCase ( self : Dict , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Any ):
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def UpperCamelCase ( self : int , *UpperCamelCase__ : int , **UpperCamelCase__ : List[str] ):
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def UpperCamelCase ( self : Any ):
A = self.tokenizer.model_input_names
A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 699 | 0 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(__lowercase )
class __magic_name__ ( __lowercase ):
"""simple docstring"""
def __init__( self :Optional[int] , **snake_case :int ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
requires_backends(self , "vision" )
self.check_model_type(UpperCamelCase__ )
def __call__( self :List[Any] , snake_case :Union[str, "Image.Image", List[Dict[str, Any]]] , snake_case :Union[str, List[str]] = None , **snake_case :Dict , ):
'''simple docstring'''
if "text_queries" in kwargs:
A_ : Dict = kwargs.pop("text_queries" )
if isinstance(UpperCamelCase__ , (str, Image.Image) ):
A_ : int = {"image": image, "candidate_labels": candidate_labels}
else:
A_ : List[str] = image
A_ : Tuple = super().__call__(UpperCamelCase__ , **UpperCamelCase__ )
return results
def SCREAMING_SNAKE_CASE ( self :List[str] , **snake_case :List[Any] ):
'''simple docstring'''
A_ : List[str] = {}
if "threshold" in kwargs:
A_ : List[str] = kwargs["threshold"]
if "top_k" in kwargs:
A_ : Any = kwargs["top_k"]
return {}, {}, postprocess_params
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :List[Any] ):
'''simple docstring'''
A_ : int = load_image(inputs["image"] )
A_ : Tuple = inputs["candidate_labels"]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A_ : str = candidate_labels.split("," )
A_ : Dict = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(UpperCamelCase__ ):
A_ : int = self.tokenizer(UpperCamelCase__ , return_tensors=self.framework )
A_ : Dict = self.image_processor(UpperCamelCase__ , return_tensors=self.framework )
yield {
"is_last": i == len(UpperCamelCase__ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :Any ):
'''simple docstring'''
A_ : Tuple = model_inputs.pop("target_size" )
A_ : Optional[int] = model_inputs.pop("candidate_label" )
A_ : Optional[Any] = model_inputs.pop("is_last" )
A_ : Dict = self.model(**UpperCamelCase__ )
A_ : List[str] = {"target_size": target_size, "candidate_label": candidate_label, "is_last": is_last, **outputs}
return model_outputs
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Tuple , snake_case :Any=0.1 , snake_case :List[Any]=None ):
'''simple docstring'''
A_ : Any = []
for model_output in model_outputs:
A_ : Dict = model_output["candidate_label"]
A_ : str = BaseModelOutput(UpperCamelCase__ )
A_ : Tuple = self.image_processor.post_process_object_detection(
outputs=UpperCamelCase__ , threshold=UpperCamelCase__ , target_sizes=model_output["target_size"] )[0]
for index in outputs["scores"].nonzero():
A_ : Tuple = outputs["scores"][index].item()
A_ : List[Any] = self._get_bounding_box(outputs["boxes"][index][0] )
A_ : Dict = {"score": score, "label": label, "box": box}
results.append(UpperCamelCase__ )
A_ : Tuple = sorted(UpperCamelCase__ , key=lambda snake_case : x["score"] , reverse=UpperCamelCase__ )
if top_k:
A_ : int = results[:top_k]
return results
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :"torch.Tensor" ):
'''simple docstring'''
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch." )
A_ , A_ , A_ , A_ : Optional[Any] = box.int().tolist()
A_ : str = {
"xmin": xmin,
"ymin": ymin,
"xmax": xmax,
"ymax": ymax,
}
return bbox
| 454 |
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> str:
return "\n".join(
f'''{number} * {i} = {number * i}''' for i in range(1, number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 699 | 0 |
"""simple docstring"""
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
__snake_case = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
__snake_case = """main"""
# Default branch name
__snake_case = """f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"""
# One particular commit (not the top of `main`)
__snake_case = """aaaaaaa"""
# This commit does not exist, so we should 404.
__snake_case = """d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"""
# Sha-1 of config.json on the top of `main`, for checking purposes
__snake_case = """4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"""
@contextlib.contextmanager
def __lowerCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
print("Welcome!" )
yield
print("Bye!" )
@contextlib.contextmanager
def __lowerCAmelCase ( ) -> int:
"""simple docstring"""
print("Bonjour!" )
yield
print("Au revoir!" )
class _lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
assert transformers.__spec__ is not None
assert importlib.util.find_spec("transformers" ) is not None
class _lowerCAmelCase ( unittest.TestCase ):
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def lowerCamelCase ( self , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
with ContextManagers([] ):
print("Transformers are awesome!" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , "Transformers are awesome!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def lowerCamelCase ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
with ContextManagers([context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Welcome!\nTransformers are awesome!\nBye!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def lowerCamelCase ( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
with ContextManagers([context_fr(), context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n" )
@require_torch
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
self.assertEqual(find_labels(UpperCamelCase__ ) , ["labels"] )
self.assertEqual(find_labels(UpperCamelCase__ ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(UpperCamelCase__ ) , ["start_positions", "end_positions"] )
class _lowerCAmelCase ( __lowercase ):
pass
self.assertEqual(find_labels(UpperCamelCase__ ) , ["labels"] )
@require_tf
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
self.assertEqual(find_labels(UpperCamelCase__ ) , ["labels"] )
self.assertEqual(find_labels(UpperCamelCase__ ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(UpperCamelCase__ ) , ["start_positions", "end_positions"] )
class _lowerCAmelCase ( __lowercase ):
pass
self.assertEqual(find_labels(UpperCamelCase__ ) , ["labels"] )
@require_flax
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
self.assertEqual(find_labels(UpperCamelCase__ ) , [] )
self.assertEqual(find_labels(UpperCamelCase__ ) , [] )
self.assertEqual(find_labels(UpperCamelCase__ ) , [] )
class _lowerCAmelCase ( __lowercase ):
pass
self.assertEqual(find_labels(UpperCamelCase__ ) , [] )
| 178 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _UpperCAmelCase ( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self : Any , UpperCamelCase__ : int = 128 , UpperCamelCase__ : int = 256 , UpperCamelCase__ : float = 2_000.0 , UpperCamelCase__ : int = 768 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 64 , UpperCamelCase__ : int = 2048 , UpperCamelCase__ : float = 0.1 , ):
super().__init__()
A = nn.Sequential(
nn.Linear(UpperCamelCase__ , d_model * 4 , bias=UpperCamelCase__ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=UpperCamelCase__ ) , nn.SiLU() , )
A = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
A = False
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Dropout(p=UpperCamelCase__ )
A = nn.ModuleList()
for lyr_num in range(UpperCamelCase__ ):
# FiLM conditional T5 decoder
A = DecoderLayer(d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ )
self.decoders.append(UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ )
A = nn.Dropout(p=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : int ):
A = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] ):
A , A , A = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
A = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
A = self.conditioning_emb(UpperCamelCase__ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
A = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
A = torch.broadcast_to(
torch.arange(UpperCamelCase__ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
A = self.position_encoding(UpperCamelCase__ )
A = self.continuous_inputs_projection(UpperCamelCase__ )
inputs += position_encodings
A = self.dropout(UpperCamelCase__ )
# decoder: No padding present.
A = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
A = [(x, self.encoder_decoder_mask(UpperCamelCase__ , UpperCamelCase__ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
A = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
A = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
A = lyr(
UpperCamelCase__ , conditioning_emb=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )[0]
A = self.decoder_norm(UpperCamelCase__ )
A = self.post_dropout(UpperCamelCase__ )
A = self.spec_out(UpperCamelCase__ )
return spec_out
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]=1e-6 ):
super().__init__()
A = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , dropout_rate=UpperCamelCase__ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ ) )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[Any]=None , ):
A = self.layer[0](
UpperCamelCase__ , conditioning_emb=UpperCamelCase__ , attention_mask=UpperCamelCase__ , )
if encoder_hidden_states is not None:
A = torch.where(encoder_attention_mask > 0 , 0 , -1e1_0 ).to(
encoder_hidden_states.dtype )
A = self.layer[1](
UpperCamelCase__ , key_value_states=UpperCamelCase__ , attention_mask=UpperCamelCase__ , )
# Apply Film Conditional Feed Forward layer
A = self.layer[-1](UpperCamelCase__ , UpperCamelCase__ )
return (hidden_states,)
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict ):
super().__init__()
A = TaLayerNorm(UpperCamelCase__ )
A = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase__ )
A = Attention(query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , out_bias=UpperCamelCase__ , scale_qk=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Tuple=None , ):
# pre_self_attention_layer_norm
A = self.layer_norm(UpperCamelCase__ )
if conditioning_emb is not None:
A = self.FiLMLayer(UpperCamelCase__ , UpperCamelCase__ )
# Self-attention block
A = self.attention(UpperCamelCase__ )
A = hidden_states + self.dropout(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ):
super().__init__()
A = Attention(query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , out_bias=UpperCamelCase__ , scale_qk=UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ , eps=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[str]=None , ):
A = self.layer_norm(UpperCamelCase__ )
A = self.attention(
UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , attention_mask=attention_mask.squeeze(1 ) , )
A = hidden_states + self.dropout(UpperCamelCase__ )
return layer_output
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any ):
super().__init__()
A = TaDenseGatedActDense(d_model=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ )
A = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ , eps=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any=None ):
A = self.layer_norm(UpperCamelCase__ )
if conditioning_emb is not None:
A = self.film(UpperCamelCase__ , UpperCamelCase__ )
A = self.DenseReluDense(UpperCamelCase__ )
A = hidden_states + self.dropout(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
A = NewGELUActivation()
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] ):
A = self.act(self.wi_a(UpperCamelCase__ ) )
A = self.wi_a(UpperCamelCase__ )
A = hidden_gelu * hidden_linear
A = self.dropout(UpperCamelCase__ )
A = self.wo(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=1e-6 ):
super().__init__()
A = nn.Parameter(torch.ones(UpperCamelCase__ ) )
A = eps
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : int ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
A = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=UpperCamelCase__ )
A = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
A = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def UpperCamelCase ( self : Any , UpperCamelCase__ : torch.Tensor ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(UpperCamelCase__ , 3.0 )) ))
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , out_features * 2 , bias=UpperCamelCase__ )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] ):
A = self.scale_bias(UpperCamelCase__ )
A , A = torch.chunk(UpperCamelCase__ , 2 , -1 )
A = x * (1 + scale) + shift
return x
| 699 | 0 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : int = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCamelCase : List[str] = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__lowerCamelCase : List[str] = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__lowerCamelCase : Optional[Any] = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__lowerCamelCase : Any = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
__lowerCamelCase : List[str] = {
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
__lowerCamelCase : Tuple = {
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
__lowerCamelCase : Optional[int] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
__lowerCamelCase : List[Any] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
__lowerCamelCase : int = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class lowerCAmelCase__ ( __lowercase ):
A = VOCAB_FILES_NAMES
A = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
A = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
A = DPRContextEncoderTokenizer
class lowerCAmelCase__ ( __lowercase ):
A = VOCAB_FILES_NAMES
A = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
A = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
A = DPRQuestionEncoderTokenizer
__lowerCamelCase : Any = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
__lowerCamelCase : Tuple = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
__lowerCamelCase : Union[str, Any] = r"""\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n """
@add_start_docstrings(__lowercase )
class lowerCAmelCase__ :
def __call__( self : Dict , UpperCamelCase_ : int , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Union[bool, str] = False , UpperCamelCase_ : Union[bool, str] = False , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , UpperCamelCase_ : Optional[bool] = None , **UpperCamelCase_ : Tuple , ) -> Dict:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
elif titles is None or texts is None:
lowerCamelCase_ : int = titles if texts is None else texts
return super().__call__(
UpperCamelCase__ , UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
lowerCamelCase_ : Optional[int] = titles if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) else [titles]
lowerCamelCase_ : Optional[int] = texts if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) else [texts]
lowerCamelCase_ : Dict = len(UpperCamelCase__ )
lowerCamelCase_ : str = questions if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) else [questions] * n_passages
assert len(UpperCamelCase__ ) == len(
UpperCamelCase__ ), F"""There should be as many titles than texts but got {len(UpperCamelCase__ )} titles and {len(UpperCamelCase__ )} texts."""
lowerCamelCase_ : Any = super().__call__(UpperCamelCase__ , UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ )['''input_ids''']
lowerCamelCase_ : Any = super().__call__(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ )['''input_ids''']
lowerCamelCase_ : Optional[int] = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(UpperCamelCase__ , UpperCamelCase__ )
]
}
if return_attention_mask is not False:
lowerCamelCase_ : List[str] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowerCamelCase_ : int = attention_mask
return self.pad(UpperCamelCase__ , padding=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
def __UpperCamelCase ( self : Optional[Any] , UpperCamelCase_ : BatchEncoding , UpperCamelCase_ : DPRReaderOutput , UpperCamelCase_ : int = 16 , UpperCamelCase_ : int = 64 , UpperCamelCase_ : int = 4 , ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : Tuple = reader_input['''input_ids''']
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] = reader_output[:3]
lowerCamelCase_ : int = len(UpperCamelCase__ )
lowerCamelCase_ : List[Any] = sorted(range(UpperCamelCase__ ) , reverse=UpperCamelCase__ , key=relevance_logits.__getitem__ )
lowerCamelCase_ : List[Any] = []
for doc_id in sorted_docs:
lowerCamelCase_ : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowerCamelCase_ : Dict = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCamelCase_ : Optional[int] = sequence_ids.index(self.pad_token_id )
else:
lowerCamelCase_ : Tuple = len(UpperCamelCase__ )
lowerCamelCase_ : int = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCamelCase__ , top_spans=UpperCamelCase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCamelCase__ , start_index=UpperCamelCase__ , end_index=UpperCamelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(UpperCamelCase__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __UpperCamelCase ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : List[int] , UpperCamelCase_ : int , UpperCamelCase_ : int , ) -> Any:
"""simple docstring"""
lowerCamelCase_ : Tuple = []
for start_index, start_score in enumerate(UpperCamelCase__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowerCamelCase_ : Union[str, Any] = sorted(UpperCamelCase__ , key=lambda UpperCamelCase_ : x[1] , reverse=UpperCamelCase__ )
lowerCamelCase_ : Optional[Any] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"""Wrong span indices: [{start_index}:{end_index}]"""
lowerCamelCase_ : Tuple = end_index - start_index + 1
assert length <= max_answer_length, F"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(UpperCamelCase__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__lowercase )
class lowerCAmelCase__ ( __lowercase ,__lowercase ):
A = VOCAB_FILES_NAMES
A = READER_PRETRAINED_VOCAB_FILES_MAP
A = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = READER_PRETRAINED_INIT_CONFIGURATION
A = ['''input_ids''', '''attention_mask''']
A = DPRReaderTokenizer
| 501 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
_UpperCAmelCase = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
_UpperCAmelCase = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ):
A = WATERMARK_BITS
A = WatermarkEncoder()
self.encoder.set_watermark('bits' , self.watermark )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : torch.FloatTensor ):
# can't encode images that are smaller than 256
if images.shape[-1] < 256:
return images
A = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
A = [self.encoder.encode(UpperCamelCase__ , 'dwtDct' ) for image in images]
A = torch.from_numpy(np.array(UpperCamelCase__ ) ).permute(0 , 3 , 1 , 2 )
A = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 699 | 0 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
_lowercase = logging.get_logger(__name__)
_lowercase = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_lowercase = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_lowercase = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
_lowercase = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
_lowercase = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
_lowercase = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
_lowercase = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
_lowercase = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
_lowercase = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
_lowercase = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class _UpperCAmelCase ( __lowercase ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class _UpperCAmelCase ( __lowercase ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
_lowercase = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
_lowercase = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
_lowercase = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(__lowercase )
class _UpperCAmelCase :
def __call__( self , a__ , a__ = None , a__ = None , a__ = False , a__ = False , a__ = None , a__ = None , a__ = None , **a__ , ):
if titles is None and texts is None:
return super().__call__(
UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
elif titles is None or texts is None:
A__ = titles if texts is None else texts
return super().__call__(
UpperCamelCase__ , UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
A__ = titles if not isinstance(UpperCamelCase__ , UpperCamelCase__) else [titles]
A__ = texts if not isinstance(UpperCamelCase__ , UpperCamelCase__) else [texts]
A__ = len(UpperCamelCase__)
A__ = questions if not isinstance(UpperCamelCase__ , UpperCamelCase__) else [questions] * n_passages
if len(UpperCamelCase__) != len(UpperCamelCase__):
raise ValueError(
F"There should be as many titles than texts but got {len(UpperCamelCase__)} titles and {len(UpperCamelCase__)} texts.")
A__ = super().__call__(UpperCamelCase__ , UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__)['''input_ids''']
A__ = super().__call__(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__)['''input_ids''']
A__ = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(UpperCamelCase__ , UpperCamelCase__)
]
}
if return_attention_mask is not False:
A__ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
A__ = attention_mask
return self.pad(UpperCamelCase__ , padding=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors=UpperCamelCase__)
def snake_case_ ( self , a__ , a__ , a__ = 1_6 , a__ = 6_4 , a__ = 4 , ):
A__ = reader_input['''input_ids''']
A__ , A__ , A__ = reader_output[:3]
A__ = len(UpperCamelCase__)
A__ = sorted(range(UpperCamelCase__) , reverse=UpperCamelCase__ , key=relevance_logits.__getitem__)
A__ = []
for doc_id in sorted_docs:
A__ = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
A__ = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
A__ = sequence_ids.index(self.pad_token_id)
else:
A__ = len(UpperCamelCase__)
A__ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCamelCase__ , top_spans=UpperCamelCase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCamelCase__ , start_index=UpperCamelCase__ , end_index=UpperCamelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1]) , ))
if len(UpperCamelCase__) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def snake_case_ ( self , a__ , a__ , a__ , a__ , ):
A__ = []
for start_index, start_score in enumerate(UpperCamelCase__):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
A__ = sorted(UpperCamelCase__ , key=lambda a__: x[1] , reverse=UpperCamelCase__)
A__ = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(F"Wrong span indices: [{start_index}:{end_index}]")
A__ = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(F"Span is too long: {length} > {max_answer_length}")
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(UpperCamelCase__) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__lowercase )
class _UpperCAmelCase ( __lowercase , __lowercase ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = READER_PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ = ['''input_ids''', '''attention_mask''']
| 632 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
_UpperCAmelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : Dict, lowerCAmelCase : Optional[int], lowerCAmelCase : List[Any], lowerCAmelCase : str ) -> int:
for attribute in key.split('.' ):
A = getattr(lowerCAmelCase, lowerCAmelCase )
if weight_type is not None:
A = getattr(lowerCAmelCase, lowerCAmelCase ).shape
else:
A = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
A = value
elif weight_type == "weight_g":
A = value
elif weight_type == "weight_v":
A = value
elif weight_type == "bias":
A = value
else:
A = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __UpperCamelCase (lowerCAmelCase : List[str], lowerCAmelCase : Optional[int] ) -> Dict:
A = []
A = fairseq_model.state_dict()
A = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
A = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, hf_model.config.feat_extract_norm == 'group', )
A = True
else:
for key, mapped_key in MAPPING.items():
A = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
A = True
if "*" in mapped_key:
A = name.split(lowerCAmelCase )[0].split('.' )[-2]
A = mapped_key.replace('*', lowerCAmelCase )
if "weight_g" in name:
A = 'weight_g'
elif "weight_v" in name:
A = 'weight_v'
elif "bias" in name:
A = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A = 'weight'
else:
A = None
set_recursively(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
continue
if not is_used:
unused_weights.append(lowerCAmelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : Tuple, lowerCAmelCase : List[Any], lowerCAmelCase : int ) -> Dict:
A = full_name.split('conv_layers.' )[-1]
A = name.split('.' )
A = int(items[0] )
A = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCAmelCase )
@torch.no_grad()
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : Dict, lowerCAmelCase : Union[str, Any]=None, lowerCAmelCase : str=None, lowerCAmelCase : List[Any]=True ) -> Union[str, Any]:
if config_path is not None:
A = UniSpeechSatConfig.from_pretrained(lowerCAmelCase )
else:
A = UniSpeechSatConfig()
A = ''
if is_finetuned:
A = UniSpeechSatForCTC(lowerCAmelCase )
else:
A = UniSpeechSatForPreTraining(lowerCAmelCase )
A , A , A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
A = model[0].eval()
recursively_load_weights(lowerCAmelCase, lowerCAmelCase )
hf_wavavec.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCAmelCase = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 699 | 0 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
__lowerCAmelCase : int = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
__lowerCAmelCase : Dict = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
__lowerCAmelCase : Tuple = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
return float((preds == labels).mean() )
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
__lowercase : Union[str, Any] = simple_accuracy(__lowerCAmelCase , __lowerCAmelCase )
__lowercase : Any = float(fa_score(y_true=__lowerCAmelCase , y_pred=__lowerCAmelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
__lowercase : Tuple = float(pearsonr(__lowerCAmelCase , __lowerCAmelCase )[0] )
__lowercase : Union[str, Any] = float(spearmanr(__lowerCAmelCase , __lowerCAmelCase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def snake_case_ ( self : List[Any] ):
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def snake_case_ ( self : Any , _snake_case : Any , _snake_case : Any ):
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(UpperCamelCase__ , UpperCamelCase__ )}
elif self.config_name == "stsb":
return pearson_and_spearman(UpperCamelCase__ , UpperCamelCase__ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(UpperCamelCase__ , UpperCamelCase__ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(UpperCamelCase__ , UpperCamelCase__ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 509 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_UpperCAmelCase = TypeVar("T")
class _UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : T ):
A = data
A = None
def __str__( self : Optional[int] ):
return f'''{self.data}'''
class _UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple ):
A = None
def __iter__( self : int ):
A = self.top
while node:
yield node.data
A = node.next
def __str__( self : Any ):
return "->".join([str(UpperCamelCase__ ) for item in self] )
def __len__( self : Dict ):
return len(tuple(iter(self ) ) )
def UpperCamelCase ( self : List[str] ):
return self.top is None
def UpperCamelCase ( self : Dict , UpperCamelCase__ : T ):
A = Node(UpperCamelCase__ )
if not self.is_empty():
A = self.top
A = node
def UpperCamelCase ( self : Dict ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , UpperCamelCase__ )
A = self.top
A = self.top.next
return pop_node.data
def UpperCamelCase ( self : List[str] ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def UpperCamelCase ( self : List[str] ):
A = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 699 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class UpperCAmelCase_ (__lowercase ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = '''poolformer'''
def __init__( self : Optional[Any] , a_ : Union[str, Any]=3 , a_ : Dict=16 , a_ : Dict=16 , a_ : Union[str, Any]=3 , a_ : int=4.0 , a_ : Any=[2, 2, 6, 2] , a_ : Any=[64, 1_28, 3_20, 5_12] , a_ : List[Any]=[7, 3, 3, 3] , a_ : Optional[int]=[4, 2, 2, 2] , a_ : Dict=[2, 1, 1, 1] , a_ : int=4 , a_ : Tuple=0.0 , a_ : Optional[Any]="gelu" , a_ : Dict=True , a_ : List[Any]=1E-5 , a_ : int=0.02 , **a_ : List[Any] , )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = num_channels
UpperCAmelCase_ : Optional[Any] = patch_size
UpperCAmelCase_ : Dict = stride
UpperCAmelCase_ : Optional[int] = padding
UpperCAmelCase_ : List[Any] = pool_size
UpperCAmelCase_ : int = hidden_sizes
UpperCAmelCase_ : List[Any] = mlp_ratio
UpperCAmelCase_ : Optional[Any] = depths
UpperCAmelCase_ : Optional[int] = patch_sizes
UpperCAmelCase_ : Union[str, Any] = strides
UpperCAmelCase_ : str = num_encoder_blocks
UpperCAmelCase_ : Any = drop_path_rate
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : Tuple = use_layer_scale
UpperCAmelCase_ : List[str] = layer_scale_init_value
UpperCAmelCase_ : int = initializer_range
super().__init__(**UpperCamelCase__ )
class UpperCAmelCase_ (__lowercase ):
"""simple docstring"""
UpperCamelCase_ : Tuple = version.parse("""1.11""" )
@property
def a ( self : Optional[Any] )-> Any:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def a ( self : List[str] )-> int:
"""simple docstring"""
return 2E-3
| 470 |
from __future__ import annotations
import math
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : bool, lowerCAmelCase : list[int], lowerCAmelCase : float ) -> int:
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if not scores:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1, node_index * 2, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), )
if is_max
else min(
minimax(depth + 1, node_index * 2, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), )
)
def __UpperCamelCase () -> None:
A = [90, 23, 6, 33, 21, 65, 123, 34_423]
A = math.log(len(lowerCAmelCase ), 2 )
print(f'''Optimal value : {minimax(0, 0, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 699 | 0 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__lowerCAmelCase : Any =logging.get_logger(__name__)
__lowerCAmelCase : str ={
"Visual-Attention-Network/van-base": (
"https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"
),
}
class UpperCAmelCase ( __lowercase ):
__lowercase = '''van'''
def __init__( self :Any , lowercase_ :List[str]=2_24 , lowercase_ :List[Any]=3 , lowercase_ :int=[7, 3, 3, 3] , lowercase_ :Dict=[4, 2, 2, 2] , lowercase_ :Dict=[64, 1_28, 3_20, 5_12] , lowercase_ :Union[str, Any]=[3, 3, 12, 3] , lowercase_ :List[str]=[8, 8, 4, 4] , lowercase_ :List[Any]="gelu" , lowercase_ :Union[str, Any]=0.0_2 , lowercase_ :str=1E-6 , lowercase_ :Optional[int]=1E-2 , lowercase_ :List[str]=0.0 , lowercase_ :Tuple=0.0 , **lowercase_ :str , )-> Any:
super().__init__(**UpperCamelCase__ )
A__ = image_size
A__ = num_channels
A__ = patch_sizes
A__ = strides
A__ = hidden_sizes
A__ = depths
A__ = mlp_ratios
A__ = hidden_act
A__ = initializer_range
A__ = layer_norm_eps
A__ = layer_scale_init_value
A__ = drop_path_rate
A__ = dropout_rate
| 440 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : PreTrainedTokenizer, lowerCAmelCase : int, lowerCAmelCase : Optional[int] = None, ) -> Dict:
A = {}
if train_file is not None:
A = [train_file]
if eval_file is not None:
A = [eval_file]
if test_file is not None:
A = [test_file]
A = datasets.load_dataset('csv', data_files=lowerCAmelCase )
A = list(ds[list(files.keys() )[0]].features.keys() )
A = features_name.pop(lowerCAmelCase )
A = list(set(ds[list(files.keys() )[0]][label_name] ) )
A = {label: i for i, label in enumerate(lowerCAmelCase )}
A = tokenizer.model_input_names
A = {}
if len(lowerCAmelCase ) == 1:
for k in files.keys():
A = ds[k].map(
lambda lowerCAmelCase : tokenizer.batch_encode_plus(
example[features_name[0]], truncation=lowerCAmelCase, max_length=lowerCAmelCase, padding='max_length' ), batched=lowerCAmelCase, )
elif len(lowerCAmelCase ) == 2:
for k in files.keys():
A = ds[k].map(
lambda lowerCAmelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]), truncation=lowerCAmelCase, max_length=lowerCAmelCase, padding='max_length', ), batched=lowerCAmelCase, )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
A = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
A = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
A = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = field(metadata={'''help''': '''Which column contains the label'''} )
SCREAMING_SNAKE_CASE : str = field(default=__lowercase , metadata={'''help''': '''The path of the training file'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase , metadata={'''help''': '''The path of the development file'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase , metadata={'''help''': '''The path of the test file'''} )
SCREAMING_SNAKE_CASE : int = field(
default=1_28 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE : bool = field(default=__lowercase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def __UpperCamelCase () -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
A , A , A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, )
logger.info(
f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
f'''16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
A , A , A , A = get_tfds(
train_file=data_args.train_file, eval_file=data_args.dev_file, test_file=data_args.test_file, tokenizer=lowerCAmelCase, label_column_id=data_args.label_column_id, max_seq_length=data_args.max_seq_length, )
A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=len(lowerCAmelCase ), labelaid=lowerCAmelCase, idalabel={id: label for label, id in labelaid.items()}, finetuning_task='text-classification', cache_dir=model_args.cache_dir, )
with training_args.strategy.scope():
A = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path, from_pt=bool('.bin' in model_args.model_name_or_path ), config=lowerCAmelCase, cache_dir=model_args.cache_dir, )
def compute_metrics(lowerCAmelCase : EvalPrediction ) -> Dict:
A = np.argmax(p.predictions, axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
A = TFTrainer(
model=lowerCAmelCase, args=lowerCAmelCase, train_dataset=lowerCAmelCase, eval_dataset=lowerCAmelCase, compute_metrics=lowerCAmelCase, )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A = trainer.evaluate()
A = os.path.join(training_args.output_dir, 'eval_results.txt' )
with open(lowerCAmelCase, 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
results.update(lowerCAmelCase )
return results
if __name__ == "__main__":
main()
| 699 | 0 |
def _A ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
"""simple docstring"""
if not (isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )):
raise ValueError("longest_common_substring() takes two strings for inputs" )
lowerCAmelCase__ = len(lowerCAmelCase_ )
lowerCAmelCase__ = len(lowerCAmelCase_ )
lowerCAmelCase__ = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
lowerCAmelCase__ = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
lowerCAmelCase__ = i
lowerCAmelCase__ = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 61 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 699 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCamelCase__( metaclass=__lowercase ):
__magic_name__ : Any = ['''torch''', '''scipy''']
def __init__( self : int , *lowerCAmelCase : str , **lowerCAmelCase : Optional[int] )-> int:
"""simple docstring"""
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def a__( cls : Tuple , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : Optional[int] )-> int:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def a__( cls : Any , *lowerCAmelCase : Any , **lowerCAmelCase : Optional[int] )-> Dict:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''scipy'''] )
| 210 |
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> Optional[int]:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCAmelCase, int(b / 2 ) ) * actual_power(lowerCAmelCase, int(b / 2 ) )
else:
return a * actual_power(lowerCAmelCase, int(b / 2 ) ) * actual_power(lowerCAmelCase, int(b / 2 ) )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> float:
if b < 0:
return 1 / actual_power(lowerCAmelCase, lowerCAmelCase )
return actual_power(lowerCAmelCase, lowerCAmelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 699 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__snake_case = 16
__snake_case = 32
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ = 16 ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('bert-base-cased' )
SCREAMING_SNAKE_CASE__ = load_dataset('glue' , 'mrpc' )
def tokenize_function(UpperCamelCase_ ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE__ = datasets.map(
UpperCamelCase_ , batched=UpperCamelCase_ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE__ = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(UpperCamelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE__ = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE__ = 8
else:
SCREAMING_SNAKE_CASE__ = None
return tokenizer.pad(
UpperCamelCase_ , padding='longest' , max_length=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_tensors='pt' , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ = DataLoader(
tokenized_datasets['train'] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = DataLoader(
tokenized_datasets['validation'] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__snake_case = mocked_dataloaders # noqa: F811
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
if os.environ.get('TESTING_MOCKED_DATALOADERS' , UpperCamelCase_ ) == "1":
SCREAMING_SNAKE_CASE__ = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
SCREAMING_SNAKE_CASE__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
SCREAMING_SNAKE_CASE__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ = config['lr']
SCREAMING_SNAKE_CASE__ = int(config['num_epochs'] )
SCREAMING_SNAKE_CASE__ = int(config['seed'] )
SCREAMING_SNAKE_CASE__ = int(config['batch_size'] )
set_seed(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_dataloaders(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE__ = batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE__ = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=UpperCamelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE__ = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ = AdamW(params=model.parameters() , lr=UpperCamelCase_ )
# Instantiate scheduler
SCREAMING_SNAKE_CASE__ = get_linear_schedule_with_warmup(
optimizer=UpperCamelCase_ , num_warmup_steps=100 , num_training_steps=(len(UpperCamelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
SCREAMING_SNAKE_CASE__ = os.path.split(UpperCamelCase_ )[-1].split('.' )[0]
accelerator.init_trackers(UpperCamelCase_ , UpperCamelCase_ )
# Now we train the model
for epoch in range(UpperCamelCase_ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
SCREAMING_SNAKE_CASE__ = 0
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE__ = model(**UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
SCREAMING_SNAKE_CASE__ = loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=UpperCamelCase_ , references=UpperCamelCase_ , )
SCREAMING_SNAKE_CASE__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , UpperCamelCase_ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'accuracy': eval_metric['accuracy'],
'f1': eval_metric['f1'],
'train_loss': total_loss.item() / len(UpperCamelCase_ ),
'epoch': epoch,
} , step=UpperCamelCase_ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def _lowercase ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=UpperCamelCase_ , default=UpperCamelCase_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=UpperCamelCase_ , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
main()
| 472 |
def __UpperCamelCase (lowerCAmelCase : list[int] ) -> int:
if not numbers:
return 0
if not isinstance(lowerCAmelCase, (list, tuple) ) or not all(
isinstance(lowerCAmelCase, lowerCAmelCase ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
A = A = A = numbers[0]
for i in range(1, len(lowerCAmelCase ) ):
# update the maximum and minimum subarray products
A = numbers[i]
if number < 0:
A , A = min_till_now, max_till_now
A = max(lowerCAmelCase, max_till_now * number )
A = min(lowerCAmelCase, min_till_now * number )
# update the maximum product found till now
A = max(lowerCAmelCase, lowerCAmelCase )
return max_prod
| 699 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__A : Dict = {
'configuration_blip': [
'BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlipConfig',
'BlipTextConfig',
'BlipVisionConfig',
],
'processing_blip': ['BlipProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = ['BlipImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
'BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlipModel',
'BlipPreTrainedModel',
'BlipForConditionalGeneration',
'BlipForQuestionAnswering',
'BlipVisionModel',
'BlipTextModel',
'BlipForImageTextRetrieval',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = [
'TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBlipModel',
'TFBlipPreTrainedModel',
'TFBlipForConditionalGeneration',
'TFBlipForQuestionAnswering',
'TFBlipVisionModel',
'TFBlipTextModel',
'TFBlipForImageTextRetrieval',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
__A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 575 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 699 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase : List[str] = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 454 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = '''biogpt'''
def __init__( self : Optional[Any] , UpperCamelCase__ : str=42384 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : Dict=24 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : str=4096 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Dict=1e-1_2 , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Optional[Any]=2 , **UpperCamelCase__ : List[Any] , ):
A = vocab_size
A = max_position_embeddings
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = scale_embedding
A = use_cache
A = layerdrop
A = activation_dropout
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 699 | 0 |
"""simple docstring"""
def __lowerCAmelCase ( lowercase : int , lowercase : int ) -> Optional[int]:
"""simple docstring"""
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowercase , int(b / 2 ) ) * actual_power(lowercase , int(b / 2 ) )
else:
return a * actual_power(lowercase , int(b / 2 ) ) * actual_power(lowercase , int(b / 2 ) )
def __lowerCAmelCase ( lowercase : int , lowercase : int ) -> float:
"""simple docstring"""
if b < 0:
return 1 / actual_power(lowercase , lowercase )
return actual_power(lowercase , lowercase )
if __name__ == "__main__":
print(power(-2, -3))
| 178 |
import sys
def __UpperCamelCase (lowerCAmelCase : Dict ) -> Dict:
A = len(lowerCAmelCase )
A = [[0 for x in range(lowerCAmelCase )] for x in range(lowerCAmelCase )]
A = [[0 for x in range(lowerCAmelCase )] for x in range(lowerCAmelCase )]
for chain_length in range(2, lowerCAmelCase ):
for a in range(1, n - chain_length + 1 ):
A = a + chain_length - 1
A = sys.maxsize
for c in range(lowerCAmelCase, lowerCAmelCase ):
A = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
A = cost
A = c
return matrix, sol
def __UpperCamelCase (lowerCAmelCase : Optional[Any], lowerCAmelCase : Union[str, Any], lowerCAmelCase : Union[str, Any] ) -> List[str]:
if i == j:
print('A' + str(lowerCAmelCase ), end=' ' )
else:
print('(', end=' ' )
print_optiomal_solution(lowerCAmelCase, lowerCAmelCase, optimal_solution[i][j] )
print_optiomal_solution(lowerCAmelCase, optimal_solution[i][j] + 1, lowerCAmelCase )
print(')', end=' ' )
def __UpperCamelCase () -> List[str]:
A = [30, 35, 15, 5, 10, 20, 25]
A = len(lowerCAmelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
A , A = matrix_chain_order(lowerCAmelCase )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowerCAmelCase, 1, n - 1 )
if __name__ == "__main__":
main()
| 699 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class lowerCAmelCase__ ( __lowercase ):
A = 42
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : Tuple , UpperCamelCase_ : Any=3 , UpperCamelCase_ : str=3 , UpperCamelCase_ : List[str]=("DownEncoderBlock2D",) , UpperCamelCase_ : str=(64,) , UpperCamelCase_ : str=2 , UpperCamelCase_ : str=32 , UpperCamelCase_ : Optional[Any]="silu" , UpperCamelCase_ : str=True , ) -> List[Any]:
"""simple docstring"""
super().__init__()
lowerCamelCase_ : Any = layers_per_block
lowerCamelCase_ : Optional[int] = torch.nn.Convad(
UpperCamelCase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
lowerCamelCase_ : int = None
lowerCamelCase_ : Optional[int] = nn.ModuleList([] )
# down
lowerCamelCase_ : Union[str, Any] = block_out_channels[0]
for i, down_block_type in enumerate(UpperCamelCase__ ):
lowerCamelCase_ : List[Any] = output_channel
lowerCamelCase_ : Tuple = block_out_channels[i]
lowerCamelCase_ : Optional[int] = i == len(UpperCamelCase__ ) - 1
lowerCamelCase_ : Any = get_down_block(
UpperCamelCase__ , num_layers=self.layers_per_block , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
self.down_blocks.append(UpperCamelCase__ )
# mid
lowerCamelCase_ : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift='''default''' , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# out
lowerCamelCase_ : List[str] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=UpperCamelCase__ , eps=1e-6 )
lowerCamelCase_ : Optional[Any] = nn.SiLU()
lowerCamelCase_ : Any = 2 * out_channels if double_z else out_channels
lowerCamelCase_ : Dict = nn.Convad(block_out_channels[-1] , UpperCamelCase__ , 3 , padding=1 )
lowerCamelCase_ : Optional[int] = False
def __UpperCamelCase ( self : int , UpperCamelCase_ : Tuple ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : Tuple = x
lowerCamelCase_ : List[str] = self.conv_in(UpperCamelCase__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase_ : List[str] ):
def custom_forward(*UpperCamelCase_ : Tuple ):
return module(*UpperCamelCase__ )
return custom_forward
# down
if is_torch_version('''>=''' , '''1.11.0''' ):
for down_block in self.down_blocks:
lowerCamelCase_ : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
# middle
lowerCamelCase_ : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
for down_block in self.down_blocks:
lowerCamelCase_ : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ )
# middle
lowerCamelCase_ : Dict = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , UpperCamelCase__ )
else:
# down
for down_block in self.down_blocks:
lowerCamelCase_ : Optional[Any] = down_block(UpperCamelCase__ )
# middle
lowerCamelCase_ : int = self.mid_block(UpperCamelCase__ )
# post-process
lowerCamelCase_ : Optional[int] = self.conv_norm_out(UpperCamelCase__ )
lowerCamelCase_ : Any = self.conv_act(UpperCamelCase__ )
lowerCamelCase_ : List[str] = self.conv_out(UpperCamelCase__ )
return sample
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : List[str] , UpperCamelCase_ : Any=3 , UpperCamelCase_ : Any=3 , UpperCamelCase_ : Optional[int]=("UpDecoderBlock2D",) , UpperCamelCase_ : int=(64,) , UpperCamelCase_ : List[Any]=2 , UpperCamelCase_ : List[Any]=32 , UpperCamelCase_ : Union[str, Any]="silu" , UpperCamelCase_ : str="group" , ) -> int:
"""simple docstring"""
super().__init__()
lowerCamelCase_ : Union[str, Any] = layers_per_block
lowerCamelCase_ : str = nn.Convad(
UpperCamelCase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
lowerCamelCase_ : Dict = None
lowerCamelCase_ : Optional[int] = nn.ModuleList([] )
lowerCamelCase_ : Dict = in_channels if norm_type == '''spatial''' else None
# mid
lowerCamelCase_ : Union[str, Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , output_scale_factor=1 , resnet_time_scale_shift='''default''' if norm_type == '''group''' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCamelCase__ , temb_channels=UpperCamelCase__ , )
# up
lowerCamelCase_ : str = list(reversed(UpperCamelCase__ ) )
lowerCamelCase_ : Optional[int] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(UpperCamelCase__ ):
lowerCamelCase_ : int = output_channel
lowerCamelCase_ : Optional[int] = reversed_block_out_channels[i]
lowerCamelCase_ : Optional[int] = i == len(UpperCamelCase__ ) - 1
lowerCamelCase_ : Any = get_up_block(
UpperCamelCase__ , num_layers=self.layers_per_block + 1 , in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , prev_output_channel=UpperCamelCase__ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=UpperCamelCase__ , resnet_groups=UpperCamelCase__ , attention_head_dim=UpperCamelCase__ , temb_channels=UpperCamelCase__ , resnet_time_scale_shift=UpperCamelCase__ , )
self.up_blocks.append(UpperCamelCase__ )
lowerCamelCase_ : Optional[int] = output_channel
# out
if norm_type == "spatial":
lowerCamelCase_ : int = SpatialNorm(block_out_channels[0] , UpperCamelCase__ )
else:
lowerCamelCase_ : Optional[int] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=UpperCamelCase__ , eps=1e-6 )
lowerCamelCase_ : int = nn.SiLU()
lowerCamelCase_ : Union[str, Any] = nn.Convad(block_out_channels[0] , UpperCamelCase__ , 3 , padding=1 )
lowerCamelCase_ : Union[str, Any] = False
def __UpperCamelCase ( self : int , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple=None ) -> str:
"""simple docstring"""
lowerCamelCase_ : str = z
lowerCamelCase_ : Union[str, Any] = self.conv_in(UpperCamelCase__ )
lowerCamelCase_ : int = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(UpperCamelCase_ : int ):
def custom_forward(*UpperCamelCase_ : str ):
return module(*UpperCamelCase__ )
return custom_forward
if is_torch_version('''>=''' , '''1.11.0''' ):
# middle
lowerCamelCase_ : Tuple = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
lowerCamelCase_ : Optional[Any] = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
lowerCamelCase_ : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ , use_reentrant=UpperCamelCase__ )
else:
# middle
lowerCamelCase_ : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ : Optional[Any] = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
lowerCamelCase_ : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
else:
# middle
lowerCamelCase_ : int = self.mid_block(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ : List[Any] = sample.to(UpperCamelCase__ )
# up
for up_block in self.up_blocks:
lowerCamelCase_ : Any = up_block(UpperCamelCase__ , UpperCamelCase__ )
# post-process
if latent_embeds is None:
lowerCamelCase_ : Dict = self.conv_norm_out(UpperCamelCase__ )
else:
lowerCamelCase_ : Union[str, Any] = self.conv_norm_out(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ : List[str] = self.conv_act(UpperCamelCase__ )
lowerCamelCase_ : Any = self.conv_out(UpperCamelCase__ )
return sample
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Union[str, Any]="random" , UpperCamelCase_ : List[Any]=False , UpperCamelCase_ : str=True ) -> Any:
"""simple docstring"""
super().__init__()
lowerCamelCase_ : int = n_e
lowerCamelCase_ : Union[str, Any] = vq_embed_dim
lowerCamelCase_ : List[str] = beta
lowerCamelCase_ : List[str] = legacy
lowerCamelCase_ : Dict = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
lowerCamelCase_ : Dict = remap
if self.remap is not None:
self.register_buffer('''used''' , torch.tensor(np.load(self.remap ) ) )
lowerCamelCase_ : Tuple = self.used.shape[0]
lowerCamelCase_ : List[Any] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
lowerCamelCase_ : List[Any] = self.re_embed
lowerCamelCase_ : Optional[Any] = self.re_embed + 1
print(
F"""Remapping {self.n_e} indices to {self.re_embed} indices. """
F"""Using {self.unknown_index} for unknown indices.""" )
else:
lowerCamelCase_ : List[Any] = n_e
lowerCamelCase_ : Any = sane_index_shape
def __UpperCamelCase ( self : Union[str, Any] , UpperCamelCase_ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : Dict = inds.shape
assert len(UpperCamelCase__ ) > 1
lowerCamelCase_ : Union[str, Any] = inds.reshape(ishape[0] , -1 )
lowerCamelCase_ : Union[str, Any] = self.used.to(UpperCamelCase__ )
lowerCamelCase_ : str = (inds[:, :, None] == used[None, None, ...]).long()
lowerCamelCase_ : str = match.argmax(-1 )
lowerCamelCase_ : Dict = match.sum(2 ) < 1
if self.unknown_index == "random":
lowerCamelCase_ : Tuple = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
lowerCamelCase_ : Optional[Any] = self.unknown_index
return new.reshape(UpperCamelCase__ )
def __UpperCamelCase ( self : Tuple , UpperCamelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : List[Any] = inds.shape
assert len(UpperCamelCase__ ) > 1
lowerCamelCase_ : Optional[int] = inds.reshape(ishape[0] , -1 )
lowerCamelCase_ : Optional[int] = self.used.to(UpperCamelCase__ )
if self.re_embed > self.used.shape[0]: # extra token
lowerCamelCase_ : Any = 0 # simply set to zero
lowerCamelCase_ : List[str] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , UpperCamelCase__ )
return back.reshape(UpperCamelCase__ )
def __UpperCamelCase ( self : List[str] , UpperCamelCase_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : List[Any] = z.permute(0 , 2 , 3 , 1 ).contiguous()
lowerCamelCase_ : int = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
lowerCamelCase_ : List[str] = torch.argmin(torch.cdist(UpperCamelCase__ , self.embedding.weight ) , dim=1 )
lowerCamelCase_ : Dict = self.embedding(UpperCamelCase__ ).view(z.shape )
lowerCamelCase_ : List[Any] = None
lowerCamelCase_ : str = None
# compute loss for embedding
if not self.legacy:
lowerCamelCase_ : int = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
lowerCamelCase_ : Tuple = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
lowerCamelCase_ : List[Any] = z + (z_q - z).detach()
# reshape back to match original input shape
lowerCamelCase_ : Optional[int] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
lowerCamelCase_ : Any = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
lowerCamelCase_ : Any = self.remap_to_used(UpperCamelCase__ )
lowerCamelCase_ : Any = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
lowerCamelCase_ : Optional[int] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __UpperCamelCase ( self : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] ) -> List[Any]:
"""simple docstring"""
if self.remap is not None:
lowerCamelCase_ : List[str] = indices.reshape(shape[0] , -1 ) # add batch axis
lowerCamelCase_ : Dict = self.unmap_to_all(UpperCamelCase__ )
lowerCamelCase_ : Union[str, Any] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
lowerCamelCase_ : Dict = self.embedding(UpperCamelCase__ )
if shape is not None:
lowerCamelCase_ : str = z_q.view(UpperCamelCase__ )
# reshape back to match original input shape
lowerCamelCase_ : Any = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class lowerCAmelCase__ ( __lowercase ):
def __init__( self : Union[str, Any] , UpperCamelCase_ : int , UpperCamelCase_ : Any=False ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : int = parameters
lowerCamelCase_ , lowerCamelCase_ : Optional[int] = torch.chunk(UpperCamelCase__ , 2 , dim=1 )
lowerCamelCase_ : Optional[int] = torch.clamp(self.logvar , -30.0 , 20.0 )
lowerCamelCase_ : Optional[int] = deterministic
lowerCamelCase_ : List[str] = torch.exp(0.5 * self.logvar )
lowerCamelCase_ : str = torch.exp(self.logvar )
if self.deterministic:
lowerCamelCase_ : Union[str, Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __UpperCamelCase ( self : Union[str, Any] , UpperCamelCase_ : Optional[torch.Generator] = None ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : int = randn_tensor(
self.mean.shape , generator=UpperCamelCase__ , device=self.parameters.device , dtype=self.parameters.dtype )
lowerCamelCase_ : Tuple = self.mean + self.std * sample
return x
def __UpperCamelCase ( self : str , UpperCamelCase_ : Tuple=None ) -> str:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __UpperCamelCase ( self : int , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any]=[1, 2, 3] ) -> Optional[Any]:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
lowerCamelCase_ : Optional[int] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=UpperCamelCase__ )
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
return self.mean
| 501 |
from math import isqrt
def __UpperCamelCase (lowerCAmelCase : int ) -> bool:
return all(number % divisor != 0 for divisor in range(2, isqrt(lowerCAmelCase ) + 1 ) )
def __UpperCamelCase (lowerCAmelCase : int = 10**6 ) -> int:
A = 0
A = 1
A = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowerCAmelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 699 | 0 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
_lowercase = logging.get_logger(__name__)
_lowercase = R"\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n"
class _UpperCAmelCase ( __lowercase ):
@add_start_docstrings(UpperCamelCase__)
def __call__( self , a__ , a__ , **a__):
raise NotImplementedError('''StoppingCriteria needs to be subclassed''')
class _UpperCAmelCase ( __lowercase ):
def __init__( self , a__ , a__ = None):
A__ = max_length
A__ = max_position_embeddings
@add_start_docstrings(UpperCamelCase__)
def __call__( self , a__ , a__ , **a__):
A__ = input_ids.shape[-1]
A__ = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'''This is a friendly reminder - the current text generation call will exceed the model\'s predefined '''
F"maximum length ({self.max_position_embeddings}). Depending on the model, you may observe "
'''exceptions, performance degradation, or nothing at all.''')
return is_done
class _UpperCAmelCase ( __lowercase ):
def __init__( self , a__ , a__):
warnings.warn(
'''The class `MaxNewTokensCriteria` is deprecated. '''
F"Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` "
'''with `max_length = start_length + max_new_tokens` instead.''' , UpperCamelCase__ , )
A__ = start_length
A__ = max_new_tokens
A__ = start_length + max_new_tokens
@add_start_docstrings(UpperCamelCase__)
def __call__( self , a__ , a__ , **a__):
return input_ids.shape[-1] >= self.max_length
class _UpperCAmelCase ( __lowercase ):
def __init__( self , a__ , a__ = None):
A__ = max_time
A__ = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(UpperCamelCase__)
def __call__( self , a__ , a__ , **a__):
return time.time() - self.initial_timestamp > self.max_time
class _UpperCAmelCase ( __lowercase ):
@add_start_docstrings(UpperCamelCase__)
def __call__( self , a__ , a__ , **a__):
return any(criteria(UpperCamelCase__ , UpperCamelCase__) for criteria in self)
@property
def snake_case_ ( self):
for stopping_criterium in self:
if isinstance(UpperCamelCase__ , UpperCamelCase__):
return stopping_criterium.max_length
elif isinstance(UpperCamelCase__ , UpperCamelCase__):
return stopping_criterium.max_length
return None
def lowerCAmelCase__ ( UpperCamelCase_ : StoppingCriteriaList , UpperCamelCase_ : int )-> StoppingCriteriaList:
A__ = stopping_criteria.max_length
A__ = deepcopy(UpperCamelCase_ )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('''You set different `max_length` for stopping criteria and `max_length` parameter''' , UpperCamelCase_ )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=UpperCamelCase_ ) )
return new_stopping_criteria
| 632 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
_UpperCAmelCase = logging.get_logger(__name__)
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
def __init__( self : List[str] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Tuple ):
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 699 | 0 |
from __future__ import annotations
from random import random
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , _snake_case : int | None = None ):
__lowercase : List[str] = value
__lowercase : Dict = random()
__lowercase : Optional[int] = None
__lowercase : Tuple = None
def __repr__( self : List[Any] ):
from pprint import pformat
if self.left is None and self.right is None:
return F'\'{self.value}: {self.prior:.5}\''
else:
return pformat(
{F'{self.value}: {self.prior:.5}': (self.left, self.right)} , indent=1 )
def __str__( self : List[str] ):
__lowercase : List[str] = str(self.value ) + ''' '''
__lowercase : List[Any] = str(self.left or '''''' )
__lowercase : Optional[int] = str(self.right or '''''' )
return value + left + right
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> tuple[Node | None, Node | None]:
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
__lowercase , __lowercase : Any = split(root.left , __lowerCAmelCase )
return left, root
else:
__lowercase , __lowercase : Any = split(root.right , __lowerCAmelCase )
return root, right
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Node | None:
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
__lowercase : Tuple = merge(left.right , __lowerCAmelCase )
return left
else:
__lowercase : Dict = merge(__lowerCAmelCase , right.left )
return right
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Node | None:
__lowercase : Union[str, Any] = Node(__lowerCAmelCase )
__lowercase , __lowercase : int = split(__lowerCAmelCase , __lowerCAmelCase )
return merge(merge(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Node | None:
__lowercase , __lowercase : Tuple = split(__lowerCAmelCase , value - 1 )
__lowercase , __lowercase : int = split(__lowerCAmelCase , __lowerCAmelCase )
return merge(__lowerCAmelCase , __lowerCAmelCase )
def UpperCAmelCase_ ( __lowerCAmelCase ) -> None:
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> Node | None:
for arg in args.split():
if arg[0] == "+":
__lowercase : Any = insert(__lowerCAmelCase , int(arg[1:] ) )
elif arg[0] == "-":
__lowercase : Tuple = erase(__lowerCAmelCase , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def UpperCAmelCase_ ( ) -> None:
__lowercase : Optional[int] = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
__lowercase : List[str] = input()
while args != "q":
__lowercase : Tuple = interact_treap(__lowerCAmelCase , __lowerCAmelCase )
print(__lowerCAmelCase )
__lowercase : Dict = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 509 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str = "geglu" , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : str = "layer_norm" , UpperCamelCase__ : bool = False , ):
super().__init__()
A = only_cross_attention
A = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
A = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
A = AdaLayerNorm(UpperCamelCase__ , UpperCamelCase__ )
elif self.use_ada_layer_norm_zero:
A = AdaLayerNormZero(UpperCamelCase__ , UpperCamelCase__ )
else:
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
A = Attention(
query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , dropout=UpperCamelCase__ , bias=UpperCamelCase__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=UpperCamelCase__ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
A = (
AdaLayerNorm(UpperCamelCase__ , UpperCamelCase__ )
if self.use_ada_layer_norm
else nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
)
A = Attention(
query_dim=UpperCamelCase__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , dropout=UpperCamelCase__ , bias=UpperCamelCase__ , upcast_attention=UpperCamelCase__ , ) # is self-attn if encoder_hidden_states is none
else:
A = None
A = None
# 3. Feed-forward
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
A = FeedForward(UpperCamelCase__ , dropout=UpperCamelCase__ , activation_fn=UpperCamelCase__ , final_dropout=UpperCamelCase__ )
# let chunk size default to None
A = None
A = 0
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ):
# Sets chunk feed-forward
A = chunk_size
A = dim
def UpperCamelCase ( self : Dict , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , UpperCamelCase__ : Dict[str, Any] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
A = self.norma(UpperCamelCase__ , UpperCamelCase__ )
elif self.use_ada_layer_norm_zero:
A , A , A , A , A = self.norma(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hidden_dtype=hidden_states.dtype )
else:
A = self.norma(UpperCamelCase__ )
A = cross_attention_kwargs if cross_attention_kwargs is not None else {}
A = self.attna(
UpperCamelCase__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
if self.use_ada_layer_norm_zero:
A = gate_msa.unsqueeze(1 ) * attn_output
A = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
A = (
self.norma(UpperCamelCase__ , UpperCamelCase__ ) if self.use_ada_layer_norm else self.norma(UpperCamelCase__ )
)
A = self.attna(
UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
A = attn_output + hidden_states
# 3. Feed-forward
A = self.norma(UpperCamelCase__ )
if self.use_ada_layer_norm_zero:
A = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
A = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
A = torch.cat(
[self.ff(UpperCamelCase__ ) for hid_slice in norm_hidden_states.chunk(UpperCamelCase__ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
A = self.ff(UpperCamelCase__ )
if self.use_ada_layer_norm_zero:
A = gate_mlp.unsqueeze(1 ) * ff_output
A = ff_output + hidden_states
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 4 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : str = "geglu" , UpperCamelCase__ : bool = False , ):
super().__init__()
A = int(dim * mult )
A = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
A = GELU(UpperCamelCase__ , UpperCamelCase__ )
if activation_fn == "gelu-approximate":
A = GELU(UpperCamelCase__ , UpperCamelCase__ , approximate='tanh' )
elif activation_fn == "geglu":
A = GEGLU(UpperCamelCase__ , UpperCamelCase__ )
elif activation_fn == "geglu-approximate":
A = ApproximateGELU(UpperCamelCase__ , UpperCamelCase__ )
A = nn.ModuleList([] )
# project in
self.net.append(UpperCamelCase__ )
# project dropout
self.net.append(nn.Dropout(UpperCamelCase__ ) )
# project out
self.net.append(nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(UpperCamelCase__ ) )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : int ):
for module in self.net:
A = module(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : str = "none" ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
A = approximate
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Dict ):
if gate.device.type != "mps":
return F.gelu(UpperCamelCase__ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def UpperCamelCase ( self : Any , UpperCamelCase__ : int ):
A = self.proj(UpperCamelCase__ )
A = self.gelu(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , dim_out * 2 )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Tuple ):
if gate.device.type != "mps":
return F.gelu(UpperCamelCase__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def UpperCamelCase ( self : str , UpperCamelCase__ : str ):
A , A = self.proj(UpperCamelCase__ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(UpperCamelCase__ )
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase ( self : Any , UpperCamelCase__ : Optional[int] ):
A = self.proj(UpperCamelCase__ )
return x * torch.sigmoid(1.702 * x )
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ):
super().__init__()
A = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
A = nn.SiLU()
A = nn.Linear(UpperCamelCase__ , embedding_dim * 2 )
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ):
A = self.linear(self.silu(self.emb(UpperCamelCase__ ) ) )
A , A = torch.chunk(UpperCamelCase__ , 2 )
A = self.norm(UpperCamelCase__ ) * (1 + scale) + shift
return x
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : str , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ):
super().__init__()
A = CombinedTimestepLabelEmbeddings(UpperCamelCase__ , UpperCamelCase__ )
A = nn.SiLU()
A = nn.Linear(UpperCamelCase__ , 6 * embedding_dim , bias=UpperCamelCase__ )
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ , eps=1e-6 )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=None ):
A = self.linear(self.silu(self.emb(UpperCamelCase__ , UpperCamelCase__ , hidden_dtype=UpperCamelCase__ ) ) )
A , A , A , A , A , A = emb.chunk(6 , dim=1 )
A = self.norm(UpperCamelCase__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : float = 1e-5 ):
super().__init__()
A = num_groups
A = eps
if act_fn is None:
A = None
else:
A = get_activation(UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , out_dim * 2 )
def UpperCamelCase ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : str ):
if self.act:
A = self.act(UpperCamelCase__ )
A = self.linear(UpperCamelCase__ )
A = emb[:, :, None, None]
A , A = emb.chunk(2 , dim=1 )
A = F.group_norm(UpperCamelCase__ , self.num_groups , eps=self.eps )
A = x * (1 + scale) + shift
return x
| 699 | 0 |
"""simple docstring"""
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Tuple )-> str:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = {}
def a ( self : List[Any] )-> Any:
"""simple docstring"""
print(self.vertex )
for i in self.vertex:
print(UpperCamelCase__ , """ -> """ , """ -> """.join([str(UpperCamelCase__ ) for j in self.vertex[i]] ) )
def a ( self : Optional[int] , a_ : int , a_ : int )-> Any:
"""simple docstring"""
if from_vertex in self.vertex:
self.vertex[from_vertex].append(UpperCamelCase__ )
else:
# else make a new vertex
UpperCAmelCase_ : Union[str, Any] = [to_vertex]
def a ( self : List[Any] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(UpperCamelCase__ , UpperCamelCase__ )
def a ( self : Optional[Any] , a_ : int , a_ : list )-> Tuple:
"""simple docstring"""
UpperCAmelCase_ : Any = True
print(UpperCamelCase__ , end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
lowercase_ = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("DFS:")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 470 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"vocab_file": "vocab.json",
"tokenizer_config_file": "tokenizer_config.json",
"merges_file": "merges.txt",
}
_UpperCAmelCase = {
"vocab_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"
),
},
"tokenizer_config_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"
),
},
"merges_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"
),
},
}
_UpperCAmelCase = "</w>"
_UpperCAmelCase = "@@ "
def __UpperCamelCase (lowerCAmelCase : Optional[int] ) -> List[str]:
A = set()
A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A = char
return pairs
# Speech2Text2 has no max input length
_UpperCAmelCase = {"facebook/s2t-wav2vec2-large-en-de": 1_024}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Any = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int]="<s>" , UpperCamelCase__ : str="<pad>" , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : Tuple="<unk>" , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : List[str]=None , **UpperCamelCase__ : Optional[int] , ):
super().__init__(
unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , **UpperCamelCase__ , )
A = do_lower_case
with open(UpperCamelCase__ , encoding='utf-8' ) as vocab_handle:
A = json.load(UpperCamelCase__ )
A = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
A = None
A = None
else:
with open(UpperCamelCase__ , encoding='utf-8' ) as merges_handle:
A = merges_handle.read().split('\n' )[:-1]
A = [tuple(merge.split()[:2] ) for merge in merges]
A = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
A = {}
@property
def UpperCamelCase ( self : Union[str, Any] ):
return len(self.decoder )
def UpperCamelCase ( self : Optional[Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Optional[int] ):
A = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
A = get_pairs(UpperCamelCase__ )
if not pairs:
return token
while True:
A = min(UpperCamelCase__ , key=lambda UpperCamelCase__ : self.bpe_ranks.get(UpperCamelCase__ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
A , A = bigram
A = []
A = 0
while i < len(UpperCamelCase__ ):
try:
A = word.index(UpperCamelCase__ , UpperCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A = j
if word[i] == first and i < len(UpperCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A = tuple(UpperCamelCase__ )
A = new_word
if len(UpperCamelCase__ ) == 1:
break
else:
A = get_pairs(UpperCamelCase__ )
A = ' '.join(UpperCamelCase__ )
if word == "\n " + BPE_TOKEN_MERGES:
A = '\n' + BPE_TOKEN_MERGES
if word.endswith(UpperCamelCase__ ):
A = word.replace(UpperCamelCase__ , '' )
A = word.replace(' ' , UpperCamelCase__ )
A = word
return word
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : Dict ):
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
A = text.lower()
A = text.split()
A = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(UpperCamelCase__ ).split(' ' ) ) )
return split_tokens
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : str ):
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def UpperCamelCase ( self : str , UpperCamelCase__ : int ):
A = self.decoder.get(UpperCamelCase__ , self.unk_token )
return result
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : List[str] ):
A = ' '.join(UpperCamelCase__ )
# make sure @@ tokens are concatenated
A = ''.join(string.split(UpperCamelCase__ ) )
return string
def UpperCamelCase ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) + '\n' )
A = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
A = token_index
writer.write(' '.join(UpperCamelCase__ ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 699 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase ( __lowercase , unittest.TestCase ):
__lowercase = KandinskyVaaPipeline
__lowercase = [
'''image_embeds''',
'''negative_image_embeds''',
]
__lowercase = ['''image_embeds''', '''negative_image_embeds''']
__lowercase = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
__lowercase = False
@property
def UpperCAmelCase_ ( self :Any )-> Dict:
return 32
@property
def UpperCAmelCase_ ( self :Tuple )-> str:
return 32
@property
def UpperCAmelCase_ ( self :Optional[int] )-> Optional[Any]:
return self.time_input_dim
@property
def UpperCAmelCase_ ( self :str )-> Dict:
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self :List[Any] )-> Dict:
return 1_00
@property
def UpperCAmelCase_ ( self :str )-> Dict:
torch.manual_seed(0 )
A__ = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
A__ = UNetaDConditionModel(**UpperCamelCase__ )
return model
@property
def UpperCAmelCase_ ( self :List[str] )-> Tuple:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase_ ( self :List[str] )-> List[Any]:
torch.manual_seed(0 )
A__ = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase_ ( self :int )-> Optional[Any]:
A__ = self.dummy_unet
A__ = self.dummy_movq
A__ = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="linear" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , steps_offset=1 , prediction_type="epsilon" , thresholding=UpperCamelCase__ , )
A__ = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def UpperCAmelCase_ ( self :Optional[Any] , lowercase_ :Optional[int] , lowercase_ :List[Any]=0 )-> Optional[Any]:
A__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCamelCase__ )
if str(UpperCamelCase__ ).startswith("mps" ):
A__ = torch.manual_seed(UpperCamelCase__ )
else:
A__ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
A__ = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def UpperCAmelCase_ ( self :Dict )-> Union[str, Any]:
A__ = "cpu"
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**UpperCamelCase__ )
A__ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
A__ = output.images
A__ = pipe(
**self.get_dummy_inputs(UpperCamelCase__ ) , return_dict=UpperCamelCase__ , )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A__ = np.array(
[0.6_2_3_7_9_7_6, 1.0, 0.3_6_4_4_1_3_3_2, 1.0, 0.7_0_6_3_9_6_3_4, 0.2_9_8_7_7_1_8_6, 0.8_5_6_5_2_1_2_5, 0.5_2_1_6_8_4_3, 0.5_4_4_5_4_0_4_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
def UpperCAmelCase_ ( self :Union[str, Any] )-> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self :str )-> Optional[Any]:
A__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy" )
A__ = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase__ )
A__ = KandinskyVaaPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
A__ = pipeline.to(UpperCamelCase__ )
pipeline.set_progress_bar_config(disable=UpperCamelCase__ )
A__ = "red cat, 4k photo"
A__ = torch.Generator(device="cuda" ).manual_seed(0 )
A__, A__ = pipe_prior(
UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
A__ = torch.Generator(device="cuda" ).manual_seed(0 )
A__ = pipeline(
image_embeds=UpperCamelCase__ , negative_image_embeds=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=1_00 , output_type="np" , )
A__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
| 440 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = '''facebook/bart-large-mnli'''
SCREAMING_SNAKE_CASE : Union[str, Any] = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
SCREAMING_SNAKE_CASE : Any = '''text_classifier'''
SCREAMING_SNAKE_CASE : Any = AutoTokenizer
SCREAMING_SNAKE_CASE : Dict = AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE : List[Any] = ['''text''', ['''text''']]
SCREAMING_SNAKE_CASE : Dict = ['''text''']
def UpperCamelCase ( self : List[str] ):
super().setup()
A = self.model.config
A = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
A = int(UpperCamelCase__ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def UpperCamelCase ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ):
A = labels
return self.pre_processor(
[text] * len(UpperCamelCase__ ) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def UpperCamelCase ( self : int , UpperCamelCase__ : List[str] ):
A = outputs.logits
A = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 699 | 0 |
def _A ( lowerCAmelCase_ : int ):
"""simple docstring"""
if length <= 0 or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(lowerCAmelCase_ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 61 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase (lowerCAmelCase : List[str] ) -> Dict:
A = r'\w+[.]\d+'
A = re.findall(lowerCAmelCase, lowerCAmelCase )
for pat in pats:
A = key.replace(lowerCAmelCase, '_'.join(pat.split('.' ) ) )
return key
def __UpperCamelCase (lowerCAmelCase : Optional[int], lowerCAmelCase : Dict, lowerCAmelCase : Dict ) -> Any:
A = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
A = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
A = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
A = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
A = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
A = pt_tensor.transpose(2, 3, 1, 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
A = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
A = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __UpperCamelCase (lowerCAmelCase : Tuple, lowerCAmelCase : Any, lowerCAmelCase : str=42 ) -> Any:
# Step 1: Convert pytorch tensor to numpy
A = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
A = flax_model.init_weights(PRNGKey(lowerCAmelCase ) )
A = flatten_dict(lowerCAmelCase )
A = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A = rename_key(lowerCAmelCase )
A = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
A , A = rename_key_and_reshape_tensor(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
A = jnp.asarray(lowerCAmelCase )
return unflatten_dict(lowerCAmelCase )
| 699 | 0 |
'''simple docstring'''
def lowerCamelCase__ ( A : str , A : list[str] ):
'''simple docstring'''
UpperCAmelCase = ''''''
for word_or_phrase in separated:
if not isinstance(A , A ):
raise Exception('''join() accepts only strings to be joined''' )
joined += word_or_phrase + separator
return joined.strip(A )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 210 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : Collection[float] | None = None ):
if components is None:
A = []
A = list(UpperCamelCase__ )
def __len__( self : List[Any] ):
return len(self.__components )
def __str__( self : str ):
return "(" + ",".join(map(UpperCamelCase__ , self.__components ) ) + ")"
def __add__( self : str , UpperCamelCase__ : Vector ):
A = len(self )
if size == len(UpperCamelCase__ ):
A = [self.__components[i] + other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return Vector(UpperCamelCase__ )
else:
raise Exception('must have the same size' )
def __sub__( self : Dict , UpperCamelCase__ : Vector ):
A = len(self )
if size == len(UpperCamelCase__ ):
A = [self.__components[i] - other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return Vector(UpperCamelCase__ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self : Tuple , UpperCamelCase__ : float ):
...
@overload
def __mul__( self : Dict , UpperCamelCase__ : Vector ):
...
def __mul__( self : Union[str, Any] , UpperCamelCase__ : float | Vector ):
if isinstance(UpperCamelCase__ , (float, int) ):
A = [c * other for c in self.__components]
return Vector(UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(self ) == len(UpperCamelCase__ ):
A = len(self )
A = [self.__components[i] * other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return sum(UpperCamelCase__ )
else: # error case
raise Exception('invalid operand!' )
def UpperCamelCase ( self : Union[str, Any] ):
return Vector(self.__components )
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : int ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def UpperCamelCase ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : float ):
assert -len(self.__components ) <= pos < len(self.__components )
A = value
def UpperCamelCase ( self : str ):
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
A = [c**2 for c in self.__components]
return math.sqrt(sum(UpperCamelCase__ ) )
def UpperCamelCase ( self : Any , UpperCamelCase__ : Vector , UpperCamelCase__ : bool = False ):
A = self * other
A = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def __UpperCamelCase (lowerCAmelCase : int ) -> Vector:
assert isinstance(lowerCAmelCase, lowerCAmelCase )
return Vector([0] * dimension )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> Vector:
assert isinstance(lowerCAmelCase, lowerCAmelCase ) and (isinstance(lowerCAmelCase, lowerCAmelCase ))
A = [0] * dimension
A = 1
return Vector(lowerCAmelCase )
def __UpperCamelCase (lowerCAmelCase : float, lowerCAmelCase : Vector, lowerCAmelCase : Vector ) -> Vector:
assert (
isinstance(lowerCAmelCase, lowerCAmelCase )
and isinstance(lowerCAmelCase, lowerCAmelCase )
and (isinstance(lowerCAmelCase, (int, float) ))
)
return x * scalar + y
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int ) -> Vector:
random.seed(lowerCAmelCase )
A = [random.randint(lowerCAmelCase, lowerCAmelCase ) for _ in range(lowerCAmelCase )]
return Vector(lowerCAmelCase )
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : list[list[float]] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
A = matrix
A = w
A = h
def __str__( self : int ):
A = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Optional[Any] , UpperCamelCase__ : Matrix ):
if self.__width == other.width() and self.__height == other.height():
A = []
for i in range(self.__height ):
A = [
self.__matrix[i][j] + other.component(UpperCamelCase__ , UpperCamelCase__ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase__ )
return Matrix(UpperCamelCase__ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self : Dict , UpperCamelCase__ : Matrix ):
if self.__width == other.width() and self.__height == other.height():
A = []
for i in range(self.__height ):
A = [
self.__matrix[i][j] - other.component(UpperCamelCase__ , UpperCamelCase__ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase__ )
return Matrix(UpperCamelCase__ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self : int , UpperCamelCase__ : float ):
...
@overload
def __mul__( self : Union[str, Any] , UpperCamelCase__ : Vector ):
...
def __mul__( self : Tuple , UpperCamelCase__ : float | Vector ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ): # matrix-vector
if len(UpperCamelCase__ ) == self.__width:
A = zero_vector(self.__height )
for i in range(self.__height ):
A = [
self.__matrix[i][j] * other.component(UpperCamelCase__ )
for j in range(self.__width )
]
ans.change_component(UpperCamelCase__ , sum(UpperCamelCase__ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(UpperCamelCase__ , (int, float) ): # matrix-scalar
A = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(UpperCamelCase__ , self.__width , self.__height )
return None
def UpperCamelCase ( self : Optional[int] ):
return self.__height
def UpperCamelCase ( self : List[Any] ):
return self.__width
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def UpperCamelCase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float ):
if 0 <= x < self.__height and 0 <= y < self.__width:
A = value
else:
raise Exception('change_component: indices out of bounds' )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
A = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(UpperCamelCase__ ) ):
A = minor[i][:y] + minor[i][y + 1 :]
return Matrix(UpperCamelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def UpperCamelCase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(UpperCamelCase__ , UpperCamelCase__ )
else:
raise Exception('Indices out of bounds' )
def UpperCamelCase ( self : Tuple ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
A = [
self.__matrix[0][y] * self.cofactor(0 , UpperCamelCase__ ) for y in range(self.__width )
]
return sum(UpperCamelCase__ )
def __UpperCamelCase (lowerCAmelCase : int ) -> Matrix:
A = [[0] * n for _ in range(lowerCAmelCase )]
return Matrix(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int ) -> Matrix:
random.seed(lowerCAmelCase )
A = [
[random.randint(lowerCAmelCase, lowerCAmelCase ) for _ in range(lowerCAmelCase )] for _ in range(lowerCAmelCase )
]
return Matrix(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
| 699 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__snake_case = [
"""openmmlab/upernet-convnext-tiny""",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__snake_case = """UperNetConfig"""
class lowercase__ ( nn.Module ):
def __init__( self : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[int, Tuple[int, int]] , UpperCAmelCase_ : Union[int, Tuple[int, int], str] = 0 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Union[int, Tuple[int, int]] = 1 , ):
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Convad(
in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , kernel_size=UpperCamelCase__ , padding=UpperCamelCase__ , bias=UpperCamelCase__ , dilation=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE__ = nn.BatchNormad(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = nn.ReLU()
def A_ ( self : Optional[Any] , UpperCAmelCase_ : torch.Tensor ):
SCREAMING_SNAKE_CASE__ = self.conv(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = self.batch_norm(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = self.activation(UpperCamelCase__ )
return output
class lowercase__ ( nn.Module ):
def __init__( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
super().__init__()
SCREAMING_SNAKE_CASE__ = [
nn.AdaptiveAvgPoolad(UpperCamelCase__ ),
UperNetConvModule(UpperCamelCase__ , UpperCamelCase__ , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(UpperCamelCase__ ) , UpperCamelCase__ )
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : torch.Tensor ):
SCREAMING_SNAKE_CASE__ = input
for layer in self.layers:
SCREAMING_SNAKE_CASE__ = layer(UpperCamelCase__ )
return hidden_state
class lowercase__ ( nn.Module ):
def __init__( self : Dict , UpperCAmelCase_ : Tuple[int, ...] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : bool ):
super().__init__()
SCREAMING_SNAKE_CASE__ = pool_scales
SCREAMING_SNAKE_CASE__ = align_corners
SCREAMING_SNAKE_CASE__ = in_channels
SCREAMING_SNAKE_CASE__ = channels
SCREAMING_SNAKE_CASE__ = []
for i, pool_scale in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = UperNetPyramidPoolingBlock(pool_scale=UpperCamelCase__ , in_channels=UpperCamelCase__ , channels=UpperCamelCase__ )
self.blocks.append(UpperCamelCase__ )
self.add_module(str(UpperCamelCase__ ) , UpperCamelCase__ )
def A_ ( self : Optional[int] , UpperCAmelCase_ : torch.Tensor ):
SCREAMING_SNAKE_CASE__ = []
for ppm in self.blocks:
SCREAMING_SNAKE_CASE__ = ppm(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = nn.functional.interpolate(
UpperCamelCase__ , size=x.size()[2:] , mode='bilinear' , align_corners=self.align_corners )
ppm_outs.append(UpperCamelCase__ )
return ppm_outs
class lowercase__ ( nn.Module ):
def __init__( self : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] ):
super().__init__()
SCREAMING_SNAKE_CASE__ = config
SCREAMING_SNAKE_CASE__ = config.pool_scales # e.g. (1, 2, 3, 6)
SCREAMING_SNAKE_CASE__ = in_channels
SCREAMING_SNAKE_CASE__ = config.hidden_size
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
SCREAMING_SNAKE_CASE__ = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
SCREAMING_SNAKE_CASE__ = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
SCREAMING_SNAKE_CASE__ = nn.ModuleList()
SCREAMING_SNAKE_CASE__ = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
SCREAMING_SNAKE_CASE__ = UperNetConvModule(UpperCamelCase__ , self.channels , kernel_size=1 )
SCREAMING_SNAKE_CASE__ = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(UpperCamelCase__ )
self.fpn_convs.append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def A_ ( self : Optional[int] ):
self.apply(self._init_weights )
def A_ ( self : List[str] , UpperCAmelCase_ : Tuple ):
if isinstance(UpperCamelCase__ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : Dict ):
SCREAMING_SNAKE_CASE__ = inputs[-1]
SCREAMING_SNAKE_CASE__ = [x]
psp_outs.extend(self.psp_modules(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE__ = torch.cat(UpperCamelCase__ , dim=1 )
SCREAMING_SNAKE_CASE__ = self.bottleneck(UpperCamelCase__ )
return output
def A_ ( self : Optional[int] , UpperCAmelCase_ : torch.Tensor ):
# build laterals
SCREAMING_SNAKE_CASE__ = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(UpperCamelCase__ ) )
# build top-down path
SCREAMING_SNAKE_CASE__ = len(UpperCamelCase__ )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE__ = laterals[i - 1].shape[2:]
SCREAMING_SNAKE_CASE__ = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=UpperCamelCase__ , mode='bilinear' , align_corners=self.align_corners )
# build outputs
SCREAMING_SNAKE_CASE__ = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE__ = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='bilinear' , align_corners=self.align_corners )
SCREAMING_SNAKE_CASE__ = torch.cat(UpperCamelCase__ , dim=1 )
SCREAMING_SNAKE_CASE__ = self.fpn_bottleneck(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = self.classifier(UpperCamelCase__ )
return output
class lowercase__ ( nn.Module ):
def __init__( self : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : Union[int, Tuple[int, int]] = 1 ):
super().__init__()
SCREAMING_SNAKE_CASE__ = config
SCREAMING_SNAKE_CASE__ = config.auxiliary_in_channels
SCREAMING_SNAKE_CASE__ = config.auxiliary_channels
SCREAMING_SNAKE_CASE__ = config.auxiliary_num_convs
SCREAMING_SNAKE_CASE__ = config.auxiliary_concat_input
SCREAMING_SNAKE_CASE__ = in_index
SCREAMING_SNAKE_CASE__ = (kernel_size // 2) * dilation
SCREAMING_SNAKE_CASE__ = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=UpperCamelCase__ , padding=UpperCamelCase__ , dilation=UpperCamelCase__ ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=UpperCamelCase__ , padding=UpperCamelCase__ , dilation=UpperCamelCase__ ) )
if self.num_convs == 0:
SCREAMING_SNAKE_CASE__ = nn.Identity()
else:
SCREAMING_SNAKE_CASE__ = nn.Sequential(*UpperCamelCase__ )
if self.concat_input:
SCREAMING_SNAKE_CASE__ = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=UpperCamelCase__ , padding=kernel_size // 2 )
SCREAMING_SNAKE_CASE__ = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def A_ ( self : Optional[Any] ):
self.apply(self._init_weights )
def A_ ( self : List[Any] , UpperCAmelCase_ : List[str] ):
if isinstance(UpperCamelCase__ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : torch.Tensor ):
# just take the relevant feature maps
SCREAMING_SNAKE_CASE__ = encoder_hidden_states[self.in_index]
SCREAMING_SNAKE_CASE__ = self.convs(UpperCamelCase__ )
if self.concat_input:
SCREAMING_SNAKE_CASE__ = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
SCREAMING_SNAKE_CASE__ = self.classifier(UpperCamelCase__ )
return output
class lowercase__ ( __lowercase ):
A__ : Union[str, Any] =UperNetConfig
A__ : List[str] ='''pixel_values'''
A__ : int =True
def A_ ( self : Tuple , UpperCAmelCase_ : int ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def A_ ( self : str ):
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def A_ ( self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int]=False ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ = value
__snake_case = R"""\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"""
__snake_case = R"""\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"""
@add_start_docstrings(
"""UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""" , __lowercase , )
class lowercase__ ( __lowercase ):
def __init__( self : List[str] , UpperCAmelCase_ : Union[str, Any] ):
super().__init__(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
SCREAMING_SNAKE_CASE__ = UperNetHead(UpperCamelCase__ , in_channels=self.backbone.channels )
SCREAMING_SNAKE_CASE__ = UperNetFCNHead(UpperCamelCase__ ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('batch_size, sequence_length' ) )
@replace_return_docstrings(output_type=UpperCamelCase__ , config_class=_CONFIG_FOR_DOC )
def A_ ( self : Dict , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[bool] = None , ):
SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE__ = output_attentions if output_attentions is not None else self.config.output_attentions
SCREAMING_SNAKE_CASE__ = self.backbone.forward_with_filtered_kwargs(
UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , output_attentions=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = outputs.feature_maps
SCREAMING_SNAKE_CASE__ = self.decode_head(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = nn.functional.interpolate(UpperCamelCase__ , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = None
if self.auxiliary_head is not None:
SCREAMING_SNAKE_CASE__ = self.auxiliary_head(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = nn.functional.interpolate(
UpperCamelCase__ , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('The number of labels should be greater than one' )
else:
# compute weighted loss
SCREAMING_SNAKE_CASE__ = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
SCREAMING_SNAKE_CASE__ = loss_fct(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = loss_fct(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
SCREAMING_SNAKE_CASE__ = (logits,) + outputs[1:]
else:
SCREAMING_SNAKE_CASE__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=UpperCamelCase__ , logits=UpperCamelCase__ , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 472 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = '''blenderbot-small'''
SCREAMING_SNAKE_CASE : Any = ['''past_key_values''']
SCREAMING_SNAKE_CASE : List[str] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] , UpperCamelCase__ : Optional[Any]=50265 , UpperCamelCase__ : Optional[int]=512 , UpperCamelCase__ : int=8 , UpperCamelCase__ : Optional[int]=2048 , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Optional[Any]=8 , UpperCamelCase__ : List[Any]=2048 , UpperCamelCase__ : int=16 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Any=512 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Union[str, Any]=0.0 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Dict=2 , **UpperCamelCase__ : List[str] , ):
A = vocab_size
A = max_position_embeddings
A = d_model
A = encoder_ffn_dim
A = encoder_layers
A = encoder_attention_heads
A = decoder_ffn_dim
A = decoder_layers
A = decoder_attention_heads
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = encoder_layerdrop
A = decoder_layerdrop
A = use_cache
A = encoder_layers
A = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
@property
def UpperCamelCase ( self : List[Any] ):
if self.task in ["default", "seq2seq-lm"]:
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
A = {0: 'batch'}
A = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
A = {0: 'batch', 1: 'decoder_sequence'}
A = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
A , A = self.num_layers
for i in range(UpperCamelCase__ ):
A = {0: 'batch', 2: 'past_sequence + sequence'}
A = {0: 'batch', 2: 'past_sequence + sequence'}
else:
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def UpperCamelCase ( self : int ):
if self.task in ["default", "seq2seq-lm"]:
A = super().outputs
else:
A = super(UpperCamelCase__ , self ).outputs
if self.use_past:
A , A = self.num_layers
for i in range(UpperCamelCase__ ):
A = {0: 'batch', 2: 'past_sequence + sequence'}
A = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def UpperCamelCase ( self : int , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Generate decoder inputs
A = seq_length if not self.use_past else 1
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
A = dict(**UpperCamelCase__ , **UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A , A = common_inputs['input_ids'].shape
A = common_inputs['decoder_input_ids'].shape[1]
A , A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = decoder_seq_length + 3
A = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(UpperCamelCase__ , UpperCamelCase__ )] , dim=1 )
A = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A , A = self.num_layers
A = min(UpperCamelCase__ , UpperCamelCase__ )
A = max(UpperCamelCase__ , UpperCamelCase__ ) - min_num_layers
A = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(UpperCamelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
) )
# TODO: test this.
A = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(UpperCamelCase__ , UpperCamelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) )
return common_inputs
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A , A = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
A = seqlen + 2
A , A = self.num_layers
A , A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = common_inputs['attention_mask'].dtype
A = torch.cat(
[common_inputs['attention_mask'], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
A = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(UpperCamelCase__ )
]
return common_inputs
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A = tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
A = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
A = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
A = dict(tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ ) )
return common_inputs
def UpperCamelCase ( self : Any , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
A = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
elif self.task == "causal-lm":
A = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
else:
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
return common_inputs
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple ):
if self.task in ["default", "seq2seq-lm"]:
A = super()._flatten_past_key_values_(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
A = super(UpperCamelCase__ , self )._flatten_past_key_values_(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
| 699 | 0 |
"""simple docstring"""
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def snake_case__ ( ) ->Optional[int]:
"""simple docstring"""
__lowercase : Dict = argparse.ArgumentParser()
parser.add_argument(
"-m", "--pretrained_model_name_or_path", type=_lowerCamelCase, default=_lowerCamelCase, required=_lowerCamelCase, help="Path to pretrained model or model identifier from huggingface.co/models.", )
parser.add_argument(
"-c", "--caption", type=_lowerCamelCase, default="robotic cat with wings", help="Text used to generate images.", )
parser.add_argument(
"-n", "--images_num", type=_lowerCamelCase, default=4, help="How much images to generate.", )
parser.add_argument(
"-s", "--seed", type=_lowerCamelCase, default=42, help="Seed for random process.", )
parser.add_argument(
"-ci", "--cuda_id", type=_lowerCamelCase, default=0, help="cuda_id.", )
__lowercase : Tuple = parser.parse_args()
return args
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) ->Dict:
"""simple docstring"""
if not len(_lowerCamelCase ) == rows * cols:
raise ValueError("The specified number of rows and columns are not correct." )
__lowercase ,__lowercase : str = imgs[0].size
__lowercase : int = Image.new("RGB", size=(cols * w, rows * h) )
__lowercase ,__lowercase : List[str] = grid.size
for i, img in enumerate(_lowerCamelCase ):
grid.paste(_lowerCamelCase, box=(i % cols * w, i // cols * h) )
return grid
def snake_case__ ( _lowerCamelCase, _lowerCamelCase="robotic cat with wings", _lowerCamelCase=7.5, _lowerCamelCase=50, _lowerCamelCase=1, _lowerCamelCase=42, ) ->List[Any]:
"""simple docstring"""
__lowercase : Optional[int] = torch.Generator(pipeline.device ).manual_seed(_lowerCamelCase )
__lowercase : Tuple = pipeline(
_lowerCamelCase, guidance_scale=_lowerCamelCase, num_inference_steps=_lowerCamelCase, generator=_lowerCamelCase, num_images_per_prompt=_lowerCamelCase, ).images
__lowercase : Optional[int] = int(math.sqrt(_lowerCamelCase ) )
__lowercase : Union[str, Any] = image_grid(_lowerCamelCase, rows=_rows, cols=num_images_per_prompt // _rows )
return grid, images
__A : Tuple = parse_args()
# Load models and create wrapper for stable diffusion
__A : List[Any] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
__A : Optional[Any] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
__A : List[str] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
__A : Dict = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
__A : Tuple = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__A : Union[str, Any] = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
__A : Tuple = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
__A : Dict = unet.to(torch.device('cuda', args.cuda_id))
__A : int = pipeline.to(unet.device)
__A, __A : Dict = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
__A : List[str] = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 575 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE : List[str] = '''BridgeTowerImageProcessor'''
SCREAMING_SNAKE_CASE : Tuple = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ):
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCamelCase__ : Union[bool, str, TruncationStrategy] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : List[Any] , ):
A = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
# add pixel_values + pixel_mask
A = self.image_processor(
UpperCamelCase__ , return_tensors=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_center_crop=UpperCamelCase__ , **UpperCamelCase__ )
encoding.update(UpperCamelCase__ )
return encoding
def UpperCamelCase ( self : Dict , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Any ):
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def UpperCamelCase ( self : int , *UpperCamelCase__ : int , **UpperCamelCase__ : List[str] ):
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def UpperCamelCase ( self : Any ):
A = self.tokenizer.model_input_names
A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 699 | 0 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def __snake_case ( _lowerCAmelCase : Tuple=None ) -> int:
if subparsers is not None:
A_ : int = subparsers.add_parser("env" )
else:
A_ : str = argparse.ArgumentParser("Accelerate env command" )
parser.add_argument(
"--config_file" , default=_lowerCAmelCase , help="The config file to use for the default values in the launching script." )
if subparsers is not None:
parser.set_defaults(func=_lowerCAmelCase )
return parser
def __snake_case ( _lowerCAmelCase : Union[str, Any] ) -> Tuple:
A_ : Optional[Any] = torch.__version__
A_ : Dict = torch.cuda.is_available()
A_ : Optional[Any] = is_xpu_available()
A_ : Any = is_npu_available()
A_ : List[Any] = "Not found"
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(_lowerCAmelCase ):
A_ : List[Any] = load_config_from_file(args.config_file ).to_dict()
A_ : Union[str, Any] = {
"`Accelerate` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Numpy version": np.__version__,
"PyTorch version (GPU?)": f"{pt_version} ({pt_cuda_available})",
"PyTorch XPU available": str(_lowerCAmelCase ),
"PyTorch NPU available": str(_lowerCAmelCase ),
"System RAM": f"{psutil.virtual_memory().total / 1024 ** 3:.2f} GB",
}
if pt_cuda_available:
A_ : List[str] = torch.cuda.get_device_name()
print("\nCopy-and-paste the text below in your GitHub issue\n" )
print("\n".join([f"- {prop}: {val}" for prop, val in info.items()] ) )
print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" )
A_ : Union[str, Any] = (
"\n".join([f"\t- {prop}: {val}" for prop, val in accelerate_config.items()] )
if isinstance(_lowerCAmelCase , _lowerCAmelCase )
else f"\t{accelerate_config}"
)
print(_lowerCAmelCase )
A_ : Optional[int] = accelerate_config
return info
def __snake_case ( ) -> int:
A_ : List[Any] = env_command_parser()
A_ : List[str] = parser.parse_args()
env_command(_lowerCAmelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 454 |
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> str:
return "\n".join(
f'''{number} * {i} = {number * i}''' for i in range(1, number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 699 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
from collections.abc import Callable
def __lowerCAmelCase ( lowercase : Callable[[int | float], int | float] , lowercase : int | float , lowercase : int | float , lowercase : int = 100 , ) -> float:
"""simple docstring"""
snake_case : Union[str, Any] = x_start
snake_case : Any = fnc(lowercase )
snake_case : Union[str, Any] = 0.0
for _ in range(lowercase ):
# Approximates curve as a sequence of linear lines and sums their length
snake_case : Tuple = (x_end - x_start) / steps + xa
snake_case : Any = fnc(lowercase )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
snake_case : List[str] = xa
snake_case : Dict = fxa
return length
if __name__ == "__main__":
def __lowerCAmelCase ( lowercase : List[str] ) -> Dict:
"""simple docstring"""
return math.sin(10 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
__snake_case = 10
while i <= 100000:
print(F'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 178 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _UpperCAmelCase ( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self : Any , UpperCamelCase__ : int = 128 , UpperCamelCase__ : int = 256 , UpperCamelCase__ : float = 2_000.0 , UpperCamelCase__ : int = 768 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 64 , UpperCamelCase__ : int = 2048 , UpperCamelCase__ : float = 0.1 , ):
super().__init__()
A = nn.Sequential(
nn.Linear(UpperCamelCase__ , d_model * 4 , bias=UpperCamelCase__ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=UpperCamelCase__ ) , nn.SiLU() , )
A = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
A = False
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Dropout(p=UpperCamelCase__ )
A = nn.ModuleList()
for lyr_num in range(UpperCamelCase__ ):
# FiLM conditional T5 decoder
A = DecoderLayer(d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ )
self.decoders.append(UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ )
A = nn.Dropout(p=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : int ):
A = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] ):
A , A , A = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
A = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
A = self.conditioning_emb(UpperCamelCase__ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
A = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
A = torch.broadcast_to(
torch.arange(UpperCamelCase__ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
A = self.position_encoding(UpperCamelCase__ )
A = self.continuous_inputs_projection(UpperCamelCase__ )
inputs += position_encodings
A = self.dropout(UpperCamelCase__ )
# decoder: No padding present.
A = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
A = [(x, self.encoder_decoder_mask(UpperCamelCase__ , UpperCamelCase__ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
A = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
A = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
A = lyr(
UpperCamelCase__ , conditioning_emb=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )[0]
A = self.decoder_norm(UpperCamelCase__ )
A = self.post_dropout(UpperCamelCase__ )
A = self.spec_out(UpperCamelCase__ )
return spec_out
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]=1e-6 ):
super().__init__()
A = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , dropout_rate=UpperCamelCase__ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ ) )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[Any]=None , ):
A = self.layer[0](
UpperCamelCase__ , conditioning_emb=UpperCamelCase__ , attention_mask=UpperCamelCase__ , )
if encoder_hidden_states is not None:
A = torch.where(encoder_attention_mask > 0 , 0 , -1e1_0 ).to(
encoder_hidden_states.dtype )
A = self.layer[1](
UpperCamelCase__ , key_value_states=UpperCamelCase__ , attention_mask=UpperCamelCase__ , )
# Apply Film Conditional Feed Forward layer
A = self.layer[-1](UpperCamelCase__ , UpperCamelCase__ )
return (hidden_states,)
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict ):
super().__init__()
A = TaLayerNorm(UpperCamelCase__ )
A = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase__ )
A = Attention(query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , out_bias=UpperCamelCase__ , scale_qk=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Tuple=None , ):
# pre_self_attention_layer_norm
A = self.layer_norm(UpperCamelCase__ )
if conditioning_emb is not None:
A = self.FiLMLayer(UpperCamelCase__ , UpperCamelCase__ )
# Self-attention block
A = self.attention(UpperCamelCase__ )
A = hidden_states + self.dropout(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ):
super().__init__()
A = Attention(query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , out_bias=UpperCamelCase__ , scale_qk=UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ , eps=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[str]=None , ):
A = self.layer_norm(UpperCamelCase__ )
A = self.attention(
UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , attention_mask=attention_mask.squeeze(1 ) , )
A = hidden_states + self.dropout(UpperCamelCase__ )
return layer_output
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any ):
super().__init__()
A = TaDenseGatedActDense(d_model=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ )
A = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ , eps=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any=None ):
A = self.layer_norm(UpperCamelCase__ )
if conditioning_emb is not None:
A = self.film(UpperCamelCase__ , UpperCamelCase__ )
A = self.DenseReluDense(UpperCamelCase__ )
A = hidden_states + self.dropout(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
A = NewGELUActivation()
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] ):
A = self.act(self.wi_a(UpperCamelCase__ ) )
A = self.wi_a(UpperCamelCase__ )
A = hidden_gelu * hidden_linear
A = self.dropout(UpperCamelCase__ )
A = self.wo(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=1e-6 ):
super().__init__()
A = nn.Parameter(torch.ones(UpperCamelCase__ ) )
A = eps
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : int ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
A = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=UpperCamelCase__ )
A = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
A = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def UpperCamelCase ( self : Any , UpperCamelCase__ : torch.Tensor ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(UpperCamelCase__ , 3.0 )) ))
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , out_features * 2 , bias=UpperCamelCase__ )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] ):
A = self.scale_bias(UpperCamelCase__ )
A , A = torch.chunk(UpperCamelCase__ , 2 , -1 )
A = x * (1 + scale) + shift
return x
| 699 | 0 |
'''simple docstring'''
from timeit import timeit
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
if number < 0:
raise ValueError('''the value of input must not be negative''' )
lowerCamelCase_ : Optional[int] = 0
while number:
number &= number - 1
result += 1
return result
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
if number < 0:
raise ValueError('''the value of input must not be negative''' )
lowerCamelCase_ : List[Any] = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def __snake_case ():
"""simple docstring"""
def do_benchmark(__UpperCAmelCase ) -> None:
lowerCamelCase_ : Union[str, Any] = '''import __main__ as z'''
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(__UpperCAmelCase ) = }""" )
lowerCamelCase_ : List[str] = timeit('''z.get_set_bits_count_using_modulo_operator(25)''' , setup=__UpperCAmelCase )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(__UpperCAmelCase ) = }""" )
lowerCamelCase_ : List[str] = timeit(
'''z.get_set_bits_count_using_brian_kernighans_algorithm(25)''' , setup=__UpperCAmelCase , )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(__UpperCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 501 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
_UpperCAmelCase = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
_UpperCAmelCase = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ):
A = WATERMARK_BITS
A = WatermarkEncoder()
self.encoder.set_watermark('bits' , self.watermark )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : torch.FloatTensor ):
# can't encode images that are smaller than 256
if images.shape[-1] < 256:
return images
A = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
A = [self.encoder.encode(UpperCamelCase__ , 'dwtDct' ) for image in images]
A = torch.from_numpy(np.array(UpperCamelCase__ ) ).permute(0 , 3 , 1 , 2 )
A = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 699 | 0 |
from collections.abc import Sequence
from queue import Queue
class _UpperCAmelCase :
def __init__( self , a__ , a__ , a__ , a__=None , a__=None):
A__ = start
A__ = end
A__ = val
A__ = (start + end) // 2
A__ = left
A__ = right
def __repr__( self):
return F"SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"
class _UpperCAmelCase :
def __init__( self , a__ , a__):
A__ = collection
A__ = function
if self.collection:
A__ = self._build_tree(0 , len(UpperCamelCase__) - 1)
def snake_case_ ( self , a__ , a__):
self._update_tree(self.root , UpperCamelCase__ , UpperCamelCase__)
def snake_case_ ( self , a__ , a__):
return self._query_range(self.root , UpperCamelCase__ , UpperCamelCase__)
def snake_case_ ( self , a__ , a__):
if start == end:
return SegmentTreeNode(UpperCamelCase__ , UpperCamelCase__ , self.collection[start])
A__ = (start + end) // 2
A__ = self._build_tree(UpperCamelCase__ , UpperCamelCase__)
A__ = self._build_tree(mid + 1 , UpperCamelCase__)
return SegmentTreeNode(UpperCamelCase__ , UpperCamelCase__ , self.fn(left.val , right.val) , UpperCamelCase__ , UpperCamelCase__)
def snake_case_ ( self , a__ , a__ , a__):
if node.start == i and node.end == i:
A__ = val
return
if i <= node.mid:
self._update_tree(node.left , UpperCamelCase__ , UpperCamelCase__)
else:
self._update_tree(node.right , UpperCamelCase__ , UpperCamelCase__)
A__ = self.fn(node.left.val , node.right.val)
def snake_case_ ( self , a__ , a__ , a__):
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , UpperCamelCase__ , UpperCamelCase__)
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , UpperCamelCase__ , node.mid) , self._query_range(node.right , node.mid + 1 , UpperCamelCase__) , )
else:
# range in right child tree
return self._query_range(node.right , UpperCamelCase__ , UpperCamelCase__)
def snake_case_ ( self):
if self.root is not None:
A__ = Queue()
queue.put(self.root)
while not queue.empty():
A__ = queue.get()
yield node
if node.left is not None:
queue.put(node.left)
if node.right is not None:
queue.put(node.right)
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("*" * 50)
_lowercase = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 632 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
_UpperCAmelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : Dict, lowerCAmelCase : Optional[int], lowerCAmelCase : List[Any], lowerCAmelCase : str ) -> int:
for attribute in key.split('.' ):
A = getattr(lowerCAmelCase, lowerCAmelCase )
if weight_type is not None:
A = getattr(lowerCAmelCase, lowerCAmelCase ).shape
else:
A = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
A = value
elif weight_type == "weight_g":
A = value
elif weight_type == "weight_v":
A = value
elif weight_type == "bias":
A = value
else:
A = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __UpperCamelCase (lowerCAmelCase : List[str], lowerCAmelCase : Optional[int] ) -> Dict:
A = []
A = fairseq_model.state_dict()
A = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
A = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, hf_model.config.feat_extract_norm == 'group', )
A = True
else:
for key, mapped_key in MAPPING.items():
A = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
A = True
if "*" in mapped_key:
A = name.split(lowerCAmelCase )[0].split('.' )[-2]
A = mapped_key.replace('*', lowerCAmelCase )
if "weight_g" in name:
A = 'weight_g'
elif "weight_v" in name:
A = 'weight_v'
elif "bias" in name:
A = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A = 'weight'
else:
A = None
set_recursively(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
continue
if not is_used:
unused_weights.append(lowerCAmelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : Tuple, lowerCAmelCase : List[Any], lowerCAmelCase : int ) -> Dict:
A = full_name.split('conv_layers.' )[-1]
A = name.split('.' )
A = int(items[0] )
A = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCAmelCase )
@torch.no_grad()
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : Dict, lowerCAmelCase : Union[str, Any]=None, lowerCAmelCase : str=None, lowerCAmelCase : List[Any]=True ) -> Union[str, Any]:
if config_path is not None:
A = UniSpeechSatConfig.from_pretrained(lowerCAmelCase )
else:
A = UniSpeechSatConfig()
A = ''
if is_finetuned:
A = UniSpeechSatForCTC(lowerCAmelCase )
else:
A = UniSpeechSatForPreTraining(lowerCAmelCase )
A , A , A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
A = model[0].eval()
recursively_load_weights(lowerCAmelCase, lowerCAmelCase )
hf_wavavec.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCAmelCase = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 699 | 0 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __lowerCAmelCase ( __lowercase ):
"""simple docstring"""
A__ : List[Any] = ''''''
A__ : List[str] = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self : List[str] , _snake_case : Optional[DatasetInfo] = None , _snake_case : Optional[str] = None , **_snake_case : Dict , ):
super().__init__(self , **UpperCamelCase__ )
__lowercase : Optional[int] = repo_info
__lowercase : Optional[Any] = token
__lowercase : str = None
def snake_case_ ( self : str ):
if self.dir_cache is None:
__lowercase : Optional[int] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__lowercase : List[str] = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(UpperCamelCase__ ): {'''name''': str(UpperCamelCase__ ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def snake_case_ ( self : int , _snake_case : str , _snake_case : str = "rb" , **_snake_case : int , ):
if not isinstance(self.repo_info , UpperCamelCase__ ):
raise NotImplementedError(F'Open is only implemented for dataset repositories, but got {self.repo_info}' )
__lowercase : str = hf_hub_url(self.repo_info.id , UpperCamelCase__ , revision=self.repo_info.sha )
return fsspec.open(
UpperCamelCase__ , mode=UpperCamelCase__ , headers=get_authentication_headers_for_url(UpperCamelCase__ , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def snake_case_ ( self : int , _snake_case : int , **_snake_case : List[Any] ):
self._get_dirs()
__lowercase : List[Any] = self._strip_protocol(UpperCamelCase__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(UpperCamelCase__ )
def snake_case_ ( self : Dict , _snake_case : Union[str, Any] , _snake_case : Any=False , **_snake_case : int ):
self._get_dirs()
__lowercase : Optional[Any] = PurePosixPath(path.strip('''/''' ) )
__lowercase : str = {}
for p, f in self.dir_cache.items():
__lowercase : List[str] = PurePosixPath(p.strip('''/''' ) )
__lowercase : str = p.parent
if root == path:
__lowercase : List[Any] = f
__lowercase : Tuple = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 509 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_UpperCAmelCase = TypeVar("T")
class _UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : T ):
A = data
A = None
def __str__( self : Optional[int] ):
return f'''{self.data}'''
class _UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple ):
A = None
def __iter__( self : int ):
A = self.top
while node:
yield node.data
A = node.next
def __str__( self : Any ):
return "->".join([str(UpperCamelCase__ ) for item in self] )
def __len__( self : Dict ):
return len(tuple(iter(self ) ) )
def UpperCamelCase ( self : List[str] ):
return self.top is None
def UpperCamelCase ( self : Dict , UpperCamelCase__ : T ):
A = Node(UpperCamelCase__ )
if not self.is_empty():
A = self.top
A = node
def UpperCamelCase ( self : Dict ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , UpperCamelCase__ )
A = self.top
A = self.top.next
return pop_node.data
def UpperCamelCase ( self : List[str] ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def UpperCamelCase ( self : List[str] ):
A = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 699 | 0 |
"""simple docstring"""
def A_ ( lowercase = 1000 ) -> int:
"""simple docstring"""
UpperCAmelCase_ ,UpperCAmelCase_ : Optional[Any] = 1, 1
UpperCAmelCase_ : List[Any] = 2
while True:
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : List[str] = fa + fa
UpperCAmelCase_ ,UpperCAmelCase_ : List[Any] = fa, f
index += 1
for _ in str(lowercase ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 470 |
from __future__ import annotations
import math
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : bool, lowerCAmelCase : list[int], lowerCAmelCase : float ) -> int:
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if not scores:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1, node_index * 2, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), )
if is_max
else min(
minimax(depth + 1, node_index * 2, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), )
)
def __UpperCamelCase () -> None:
A = [90, 23, 6, 33, 21, 65, 123, 34_423]
A = math.log(len(lowerCAmelCase ), 2 )
print(f'''Optimal value : {minimax(0, 0, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 699 | 0 |
'''simple docstring'''
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class UpperCAmelCase ( pl.LightningModule ):
def __init__( self :List[str] , lowercase_ :str )-> List[str]:
super().__init__()
A__ = model
A__ = 2
A__ = nn.Linear(self.model.config.hidden_size , self.num_labels )
def UpperCAmelCase_ ( self :int )-> Tuple:
pass
def UpperCamelCase ( _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : str ):
# load longformer model from model identifier
A__ = LongformerModel.from_pretrained(_lowerCamelCase )
A__ = LightningModel(_lowerCamelCase )
A__ = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
A__ = LongformerForQuestionAnswering.from_pretrained(_lowerCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_lowerCamelCase )
print(F"Conversion successful. Model saved under {pytorch_dump_folder_path}" )
if __name__ == "__main__":
__lowerCAmelCase : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : Optional[int] =parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 440 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : PreTrainedTokenizer, lowerCAmelCase : int, lowerCAmelCase : Optional[int] = None, ) -> Dict:
A = {}
if train_file is not None:
A = [train_file]
if eval_file is not None:
A = [eval_file]
if test_file is not None:
A = [test_file]
A = datasets.load_dataset('csv', data_files=lowerCAmelCase )
A = list(ds[list(files.keys() )[0]].features.keys() )
A = features_name.pop(lowerCAmelCase )
A = list(set(ds[list(files.keys() )[0]][label_name] ) )
A = {label: i for i, label in enumerate(lowerCAmelCase )}
A = tokenizer.model_input_names
A = {}
if len(lowerCAmelCase ) == 1:
for k in files.keys():
A = ds[k].map(
lambda lowerCAmelCase : tokenizer.batch_encode_plus(
example[features_name[0]], truncation=lowerCAmelCase, max_length=lowerCAmelCase, padding='max_length' ), batched=lowerCAmelCase, )
elif len(lowerCAmelCase ) == 2:
for k in files.keys():
A = ds[k].map(
lambda lowerCAmelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]), truncation=lowerCAmelCase, max_length=lowerCAmelCase, padding='max_length', ), batched=lowerCAmelCase, )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
A = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
A = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
A = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = field(metadata={'''help''': '''Which column contains the label'''} )
SCREAMING_SNAKE_CASE : str = field(default=__lowercase , metadata={'''help''': '''The path of the training file'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase , metadata={'''help''': '''The path of the development file'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase , metadata={'''help''': '''The path of the test file'''} )
SCREAMING_SNAKE_CASE : int = field(
default=1_28 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE : bool = field(default=__lowercase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def __UpperCamelCase () -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
A , A , A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, )
logger.info(
f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
f'''16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
A , A , A , A = get_tfds(
train_file=data_args.train_file, eval_file=data_args.dev_file, test_file=data_args.test_file, tokenizer=lowerCAmelCase, label_column_id=data_args.label_column_id, max_seq_length=data_args.max_seq_length, )
A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=len(lowerCAmelCase ), labelaid=lowerCAmelCase, idalabel={id: label for label, id in labelaid.items()}, finetuning_task='text-classification', cache_dir=model_args.cache_dir, )
with training_args.strategy.scope():
A = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path, from_pt=bool('.bin' in model_args.model_name_or_path ), config=lowerCAmelCase, cache_dir=model_args.cache_dir, )
def compute_metrics(lowerCAmelCase : EvalPrediction ) -> Dict:
A = np.argmax(p.predictions, axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
A = TFTrainer(
model=lowerCAmelCase, args=lowerCAmelCase, train_dataset=lowerCAmelCase, eval_dataset=lowerCAmelCase, compute_metrics=lowerCAmelCase, )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A = trainer.evaluate()
A = os.path.join(training_args.output_dir, 'eval_results.txt' )
with open(lowerCAmelCase, 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
results.update(lowerCAmelCase )
return results
if __name__ == "__main__":
main()
| 699 | 0 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __lowerCamelCase ( __lowercase , __lowercase ):
"""simple docstring"""
snake_case__ = 1
@register_to_config
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int = 1_000 , SCREAMING_SNAKE_CASE__ : Optional[Union[np.ndarray, List[float]]] = None ) -> Dict:
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(UpperCamelCase__ )
# standard deviation of the initial noise distribution
lowerCAmelCase__ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
lowerCAmelCase__ = 4
# running values
lowerCAmelCase__ = []
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, torch.device] = None ) -> List[Any]:
lowerCAmelCase__ = num_inference_steps
lowerCAmelCase__ = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
lowerCAmelCase__ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
lowerCAmelCase__ = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
lowerCAmelCase__ = torch.sin(steps * math.pi / 2 ) ** 2
lowerCAmelCase__ = (1.0 - self.betas**2) ** 0.5
lowerCAmelCase__ = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
lowerCAmelCase__ = timesteps.to(UpperCamelCase__ )
lowerCAmelCase__ = []
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : bool = True , ) -> int:
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler" )
lowerCAmelCase__ = (self.timesteps == timestep).nonzero().item()
lowerCAmelCase__ = timestep_index + 1
lowerCAmelCase__ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCamelCase__ )
if len(self.ets ) == 1:
lowerCAmelCase__ = self.ets[-1]
elif len(self.ets ) == 2:
lowerCAmelCase__ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
lowerCAmelCase__ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
lowerCAmelCase__ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
lowerCAmelCase__ = self._get_prev_sample(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase__ )
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Any ) -> Dict:
return sample
def a ( self : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[str]:
lowerCAmelCase__ = self.alphas[timestep_index]
lowerCAmelCase__ = self.betas[timestep_index]
lowerCAmelCase__ = self.alphas[prev_timestep_index]
lowerCAmelCase__ = self.betas[prev_timestep_index]
lowerCAmelCase__ = (sample - sigma * ets) / max(UpperCamelCase__ , 1e-8 )
lowerCAmelCase__ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : List[str] ) -> Dict:
return self.config.num_train_timesteps
| 61 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 699 | 0 |
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_lowercase : int = logging.getLogger(__name__)
def lowerCamelCase__ ( A : List[str] , A : str ):
'''simple docstring'''
if os.path.exists(A ):
if os.path.exists(os.path.join(A , '''config.json''' ) ) and os.path.isfile(
os.path.join(A , '''config.json''' ) ):
os.remove(os.path.join(A , '''config.json''' ) )
if os.path.exists(os.path.join(A , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(A , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(A , '''pytorch_model.bin''' ) )
else:
os.makedirs(A )
model.save_pretrained(A )
def lowerCamelCase__ ( A : str , A : str=False ):
'''simple docstring'''
UpperCAmelCase = 2
if unlogit:
UpperCAmelCase = torch.pow(A , A )
UpperCAmelCase = p * torch.log(A )
UpperCAmelCase = 0
return -plogp.sum(dim=-1 )
def lowerCamelCase__ ( A : Dict ):
'''simple docstring'''
logger.info('''lv, h >\t''' + '''\t'''.join(f"""{x + 1}""" for x in range(len(A ) ) ) )
for row in range(len(A ) ):
if tensor.dtype != torch.long:
logger.info(f"""layer {row + 1}:\t""" + '''\t'''.join(f"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(f"""layer {row + 1}:\t""" + '''\t'''.join(f"""{x:d}""" for x in tensor[row].cpu().data ) )
def lowerCamelCase__ ( A : Any , A : int , A : Any , A : Optional[Any]=True , A : Optional[int]=True , A : Optional[int]=None , A : Union[str, Any]=False ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase = model.config.num_hidden_layers, model.config.num_attention_heads
UpperCAmelCase = torch.zeros(A , A ).to(args.device )
UpperCAmelCase = torch.zeros(A , A ).to(args.device )
if head_mask is None:
UpperCAmelCase = torch.ones(A , A ).to(args.device )
head_mask.requires_grad_(requires_grad=A )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
UpperCAmelCase = None
UpperCAmelCase = 0.0
UpperCAmelCase = 0.0
for step, inputs in enumerate(tqdm(A , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
UpperCAmelCase = tuple(t.to(args.device ) for t in inputs )
((UpperCAmelCase ) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
UpperCAmelCase = model(A , labels=A , head_mask=A )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(A ):
UpperCAmelCase = entropy(attn.detach() , A )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(A ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
UpperCAmelCase = 2
UpperCAmelCase = torch.pow(torch.pow(A , A ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
UpperCAmelCase = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(A )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(A )
logger.info('''Head ranked by importance scores''' )
UpperCAmelCase = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
UpperCAmelCase = torch.arange(
head_importance.numel() , device=args.device )
UpperCAmelCase = head_ranks.view_as(A )
print_ad_tensor(A )
return attn_entropy, head_importance, total_loss
def lowerCamelCase__ ( A : List[Any] , A : Dict , A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = compute_heads_importance(A , A , A , compute_entropy=A )
UpperCAmelCase = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , A , original_score * args.masking_threshold )
UpperCAmelCase = torch.ones_like(A )
UpperCAmelCase = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
UpperCAmelCase = original_score
while current_score >= original_score * args.masking_threshold:
UpperCAmelCase = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
UpperCAmelCase = float('''Inf''' )
UpperCAmelCase = head_importance.view(-1 ).sort()[1]
if len(A ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
UpperCAmelCase = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
UpperCAmelCase = new_head_mask.view(-1 )
UpperCAmelCase = 0.0
UpperCAmelCase = new_head_mask.view_as(A )
UpperCAmelCase = new_head_mask.clone().detach()
print_ad_tensor(A )
# Compute metric and head importance again
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = compute_heads_importance(
A , A , A , compute_entropy=A , head_mask=A )
UpperCAmelCase = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , A , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info('''Final head mask''' )
print_ad_tensor(A )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCamelCase__ ( A : Optional[Any] , A : Any , A : Union[str, Any] , A : List[Any] ):
'''simple docstring'''
UpperCAmelCase = datetime.now()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = compute_heads_importance(
A , A , A , compute_entropy=A , compute_importance=A , head_mask=A )
UpperCAmelCase = 1 / loss
UpperCAmelCase = datetime.now() - before_time
UpperCAmelCase = sum(p.numel() for p in model.parameters() )
UpperCAmelCase = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(A ) )
}
for k, v in heads_to_prune.items():
if isinstance(A , A ):
UpperCAmelCase = [
v,
]
assert sum(len(A ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(A )
UpperCAmelCase = sum(p.numel() for p in model.parameters() )
UpperCAmelCase = datetime.now()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = compute_heads_importance(
A , A , A , compute_entropy=A , compute_importance=A , head_mask=A , actually_pruned=A , )
UpperCAmelCase = 1 / loss
UpperCAmelCase = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , A , A , pruned_num_params / original_num_params * 1_00 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , A , A )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 1_00 )
save_model(A , args.output_dir )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=A , type=A , required=A , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=A , type=A , required=A , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=A , type=A , required=A , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=A , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=A , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=A , type=A , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=A , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=A , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=A , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=A , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=1_28 , type=A , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=A , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=A , default=42 )
parser.add_argument('''--local_rank''' , type=A , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=A , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=A , default='''''' , help='''Can be used for distant debugging.''' )
UpperCAmelCase = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=A )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
UpperCAmelCase = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
UpperCAmelCase = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
UpperCAmelCase = torch.device('''cuda''' , args.local_rank )
UpperCAmelCase = 1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
UpperCAmelCase = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
UpperCAmelCase = nn.parallel.DistributedDataParallel(
A , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=A )
elif args.n_gpu > 1:
UpperCAmelCase = nn.DataParallel(A )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=A )
torch.save(A , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , A )
# Prepare dataset
UpperCAmelCase = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
UpperCAmelCase = (torch.from_numpy(A ),)
UpperCAmelCase = TensorDataset(*A )
UpperCAmelCase = RandomSampler(A )
UpperCAmelCase = DataLoader(A , sampler=A , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(A , A , A )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
UpperCAmelCase = mask_heads(A , A , A )
prune_heads(A , A , A , A )
if __name__ == "__main__":
main()
| 210 |
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> Optional[int]:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCAmelCase, int(b / 2 ) ) * actual_power(lowerCAmelCase, int(b / 2 ) )
else:
return a * actual_power(lowerCAmelCase, int(b / 2 ) ) * actual_power(lowerCAmelCase, int(b / 2 ) )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> float:
if b < 0:
return 1 / actual_power(lowerCAmelCase, lowerCAmelCase )
return actual_power(lowerCAmelCase, lowerCAmelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 699 | 0 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
__snake_case = logging.get_logger(__name__)
class lowercase__ ( __lowercase ):
def __init__( self : Optional[int] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Union[str, Any] ):
warnings.warn(
'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use SegformerImageProcessor instead.' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 472 |
def __UpperCamelCase (lowerCAmelCase : list[int] ) -> int:
if not numbers:
return 0
if not isinstance(lowerCAmelCase, (list, tuple) ) or not all(
isinstance(lowerCAmelCase, lowerCAmelCase ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
A = A = A = numbers[0]
for i in range(1, len(lowerCAmelCase ) ):
# update the maximum and minimum subarray products
A = numbers[i]
if number < 0:
A , A = min_till_now, max_till_now
A = max(lowerCAmelCase, max_till_now * number )
A = min(lowerCAmelCase, min_till_now * number )
# update the maximum product found till now
A = max(lowerCAmelCase, lowerCAmelCase )
return max_prod
| 699 | 0 |
"""simple docstring"""
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
__A : List[str] = False
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : List[str] , lowercase__ : Union[str, Any]=3_2 ):
set_seed(0 )
__lowercase : Optional[Any] = UNetaDModel(sample_size=UpperCamelCase__ , in_channels=3 , out_channels=3 )
__lowercase : str = torch.optim.SGD(model.parameters() , lr=0.0_0_0_1 )
return model, optimizer
@slow
def snake_case ( self : Dict ):
__lowercase : Optional[Any] = "cpu" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
__lowercase : List[Any] = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule="linear" , clip_sample=UpperCamelCase__ , )
__lowercase : List[str] = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule="linear" , clip_sample=UpperCamelCase__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
__lowercase : Union[str, Any] = [torch.randn((4, 3, 3_2, 3_2) ).clip(-1 , 1 ).to(UpperCamelCase__ ) for _ in range(4 )]
__lowercase : str = [torch.randn((4, 3, 3_2, 3_2) ).to(UpperCamelCase__ ) for _ in range(4 )]
__lowercase : Optional[int] = [torch.randint(0 , 1_0_0_0 , (4,) ).long().to(UpperCamelCase__ ) for _ in range(4 )]
# train with a DDPM scheduler
__lowercase ,__lowercase : Optional[Any] = self.get_model_optimizer(resolution=3_2 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
__lowercase : str = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
__lowercase : str = model(UpperCamelCase__ , timesteps[i] ).sample
__lowercase : Any = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
__lowercase ,__lowercase : Dict = self.get_model_optimizer(resolution=3_2 )
model.train().to(UpperCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
__lowercase : Dict = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
__lowercase : Optional[int] = model(UpperCamelCase__ , timesteps[i] ).sample
__lowercase : List[str] = torch.nn.functional.mse_loss(UpperCamelCase__ , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
| 575 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 699 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCAmelCase : List[Any] = {
'''configuration_swiftformer''': [
'''SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SwiftFormerConfig''',
'''SwiftFormerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = [
'''SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwiftFormerForImageClassification''',
'''SwiftFormerModel''',
'''SwiftFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 454 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"microsoft/biogpt": "https://huggingface.co/microsoft/biogpt/resolve/main/config.json",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = '''biogpt'''
def __init__( self : Optional[Any] , UpperCamelCase__ : str=42384 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : Dict=24 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : str=4096 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Tuple=1024 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Dict=1e-1_2 , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Optional[Any]=2 , **UpperCamelCase__ : List[Any] , ):
A = vocab_size
A = max_position_embeddings
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = scale_embedding
A = use_cache
A = layerdrop
A = activation_dropout
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 699 | 0 |
"""simple docstring"""
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 178 |
import sys
def __UpperCamelCase (lowerCAmelCase : Dict ) -> Dict:
A = len(lowerCAmelCase )
A = [[0 for x in range(lowerCAmelCase )] for x in range(lowerCAmelCase )]
A = [[0 for x in range(lowerCAmelCase )] for x in range(lowerCAmelCase )]
for chain_length in range(2, lowerCAmelCase ):
for a in range(1, n - chain_length + 1 ):
A = a + chain_length - 1
A = sys.maxsize
for c in range(lowerCAmelCase, lowerCAmelCase ):
A = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
A = cost
A = c
return matrix, sol
def __UpperCamelCase (lowerCAmelCase : Optional[Any], lowerCAmelCase : Union[str, Any], lowerCAmelCase : Union[str, Any] ) -> List[str]:
if i == j:
print('A' + str(lowerCAmelCase ), end=' ' )
else:
print('(', end=' ' )
print_optiomal_solution(lowerCAmelCase, lowerCAmelCase, optimal_solution[i][j] )
print_optiomal_solution(lowerCAmelCase, optimal_solution[i][j] + 1, lowerCAmelCase )
print(')', end=' ' )
def __UpperCamelCase () -> List[str]:
A = [30, 35, 15, 5, 10, 20, 25]
A = len(lowerCAmelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
A , A = matrix_chain_order(lowerCAmelCase )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowerCAmelCase, 1, n - 1 )
if __name__ == "__main__":
main()
| 699 | 0 |
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class lowerCAmelCase__ ( __lowercase ,__lowercase ):
@register_to_config
def __init__( self : Any , UpperCamelCase_ : int = 128 , UpperCamelCase_ : int = 256 , UpperCamelCase_ : float = 2_000.0 , UpperCamelCase_ : int = 768 , UpperCamelCase_ : int = 12 , UpperCamelCase_ : int = 12 , UpperCamelCase_ : int = 64 , UpperCamelCase_ : int = 2_048 , UpperCamelCase_ : float = 0.1 , ) -> List[Any]:
"""simple docstring"""
super().__init__()
lowerCamelCase_ : Union[str, Any] = nn.Sequential(
nn.Linear(UpperCamelCase__ , d_model * 4 , bias=UpperCamelCase__ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=UpperCamelCase__ ) , nn.SiLU() , )
lowerCamelCase_ : Optional[int] = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ : Union[str, Any] = False
lowerCamelCase_ : Optional[int] = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
lowerCamelCase_ : Optional[int] = nn.Dropout(p=UpperCamelCase__ )
lowerCamelCase_ : Optional[Any] = nn.ModuleList()
for lyr_num in range(UpperCamelCase__ ):
# FiLM conditional T5 decoder
lowerCamelCase_ : Union[str, Any] = DecoderLayer(d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ )
self.decoders.append(UpperCamelCase__ )
lowerCamelCase_ : Optional[Any] = TaLayerNorm(UpperCamelCase__ )
lowerCamelCase_ : str = nn.Dropout(p=UpperCamelCase__ )
lowerCamelCase_ : Optional[int] = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
def __UpperCamelCase ( self : Tuple , UpperCamelCase_ : Any , UpperCamelCase_ : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ : List[str] = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def __UpperCamelCase ( self : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[Any] = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
lowerCamelCase_ : Optional[Any] = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
lowerCamelCase_ : List[Any] = self.conditioning_emb(UpperCamelCase__ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
lowerCamelCase_ : Dict = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
lowerCamelCase_ : str = torch.broadcast_to(
torch.arange(UpperCamelCase__ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
lowerCamelCase_ : str = self.position_encoding(UpperCamelCase__ )
lowerCamelCase_ : Any = self.continuous_inputs_projection(UpperCamelCase__ )
inputs += position_encodings
lowerCamelCase_ : List[Any] = self.dropout(UpperCamelCase__ )
# decoder: No padding present.
lowerCamelCase_ : Dict = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
lowerCamelCase_ : Union[str, Any] = [(x, self.encoder_decoder_mask(UpperCamelCase__ , UpperCamelCase__ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
lowerCamelCase_ : Optional[int] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
lowerCamelCase_ : Any = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
lowerCamelCase_ : Union[str, Any] = lyr(
UpperCamelCase__ , conditioning_emb=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )[0]
lowerCamelCase_ : Dict = self.decoder_norm(UpperCamelCase__ )
lowerCamelCase_ : Optional[Any] = self.post_dropout(UpperCamelCase__ )
lowerCamelCase_ : List[Any] = self.spec_out(UpperCamelCase__ )
return spec_out
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any]=1e-6 ) -> Tuple:
"""simple docstring"""
super().__init__()
lowerCamelCase_ : Union[str, Any] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , dropout_rate=UpperCamelCase__ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ ) )
def __UpperCamelCase ( self : Dict , UpperCamelCase_ : int , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : int=None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : List[Any]=None , ) -> int:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = self.layer[0](
UpperCamelCase__ , conditioning_emb=UpperCamelCase__ , attention_mask=UpperCamelCase__ , )
if encoder_hidden_states is not None:
lowerCamelCase_ : int = torch.where(encoder_attention_mask > 0 , 0 , -1e1_0 ).to(
encoder_hidden_states.dtype )
lowerCamelCase_ : Dict = self.layer[1](
UpperCamelCase__ , key_value_states=UpperCamelCase__ , attention_mask=UpperCamelCase__ , )
# Apply Film Conditional Feed Forward layer
lowerCamelCase_ : str = self.layer[-1](UpperCamelCase__ , UpperCamelCase__ )
return (hidden_states,)
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict ) -> List[Any]:
"""simple docstring"""
super().__init__()
lowerCamelCase_ : List[str] = TaLayerNorm(UpperCamelCase__ )
lowerCamelCase_ : Any = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase__ )
lowerCamelCase_ : Tuple = Attention(query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , out_bias=UpperCamelCase__ , scale_qk=UpperCamelCase__ )
lowerCamelCase_ : Dict = nn.Dropout(UpperCamelCase__ )
def __UpperCamelCase ( self : int , UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Tuple=None , ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : Any = self.layer_norm(UpperCamelCase__ )
if conditioning_emb is not None:
lowerCamelCase_ : Any = self.FiLMLayer(UpperCamelCase__ , UpperCamelCase__ )
# Self-attention block
lowerCamelCase_ : Optional[Any] = self.attention(UpperCamelCase__ )
lowerCamelCase_ : Optional[int] = hidden_states + self.dropout(UpperCamelCase__ )
return hidden_states
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] ) -> Tuple:
"""simple docstring"""
super().__init__()
lowerCamelCase_ : Dict = Attention(query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , out_bias=UpperCamelCase__ , scale_qk=UpperCamelCase__ )
lowerCamelCase_ : List[str] = TaLayerNorm(UpperCamelCase__ , eps=UpperCamelCase__ )
lowerCamelCase_ : List[Any] = nn.Dropout(UpperCamelCase__ )
def __UpperCamelCase ( self : Optional[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : List[str]=None , ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : int = self.layer_norm(UpperCamelCase__ )
lowerCamelCase_ : Tuple = self.attention(
UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , attention_mask=attention_mask.squeeze(1 ) , )
lowerCamelCase_ : Any = hidden_states + self.dropout(UpperCamelCase__ )
return layer_output
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : Any , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any ) -> List[Any]:
"""simple docstring"""
super().__init__()
lowerCamelCase_ : List[str] = TaDenseGatedActDense(d_model=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ )
lowerCamelCase_ : Optional[int] = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase__ )
lowerCamelCase_ : Tuple = TaLayerNorm(UpperCamelCase__ , eps=UpperCamelCase__ )
lowerCamelCase_ : Tuple = nn.Dropout(UpperCamelCase__ )
def __UpperCamelCase ( self : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any=None ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = self.layer_norm(UpperCamelCase__ )
if conditioning_emb is not None:
lowerCamelCase_ : Dict = self.film(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ : Dict = self.DenseReluDense(UpperCamelCase__ )
lowerCamelCase_ : Any = hidden_states + self.dropout(UpperCamelCase__ )
return hidden_states
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : Any , UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int ) -> str:
"""simple docstring"""
super().__init__()
lowerCamelCase_ : int = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
lowerCamelCase_ : Optional[int] = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
lowerCamelCase_ : Optional[Any] = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
lowerCamelCase_ : Optional[Any] = nn.Dropout(UpperCamelCase__ )
lowerCamelCase_ : Any = NewGELUActivation()
def __UpperCamelCase ( self : Optional[Any] , UpperCamelCase_ : List[Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : List[Any] = self.act(self.wi_a(UpperCamelCase__ ) )
lowerCamelCase_ : int = self.wi_a(UpperCamelCase__ )
lowerCamelCase_ : Tuple = hidden_gelu * hidden_linear
lowerCamelCase_ : int = self.dropout(UpperCamelCase__ )
lowerCamelCase_ : Dict = self.wo(UpperCamelCase__ )
return hidden_states
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Tuple=1e-6 ) -> int:
"""simple docstring"""
super().__init__()
lowerCamelCase_ : Tuple = nn.Parameter(torch.ones(UpperCamelCase__ ) )
lowerCamelCase_ : str = eps
def __UpperCamelCase ( self : Optional[int] , UpperCamelCase_ : int ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : str = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=UpperCamelCase__ )
lowerCamelCase_ : int = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
lowerCamelCase_ : Dict = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class lowerCAmelCase__ ( nn.Module ):
def __UpperCamelCase ( self : Any , UpperCamelCase_ : torch.Tensor ) -> Optional[int]:
"""simple docstring"""
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_4715 * torch.pow(UpperCamelCase__ , 3.0 )) ))
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : int ) -> Tuple:
"""simple docstring"""
super().__init__()
lowerCamelCase_ : Any = nn.Linear(UpperCamelCase__ , out_features * 2 , bias=UpperCamelCase__ )
def __UpperCamelCase ( self : Tuple , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ : List[Any] = self.scale_bias(UpperCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ : int = torch.chunk(UpperCamelCase__ , 2 , -1 )
lowerCamelCase_ : Any = x * (1 + scale) + shift
return x
| 501 |
from math import isqrt
def __UpperCamelCase (lowerCAmelCase : int ) -> bool:
return all(number % divisor != 0 for divisor in range(2, isqrt(lowerCAmelCase ) + 1 ) )
def __UpperCamelCase (lowerCAmelCase : int = 10**6 ) -> int:
A = 0
A = 1
A = 7
while prime_candidate < max_prime:
primes_count += is_prime(lowerCAmelCase )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 699 | 0 |
from __future__ import annotations
def lowerCAmelCase__ ( UpperCamelCase_ : dict , UpperCamelCase_ : str )-> set[str]:
A__ , A__ = set(UpperCamelCase_ ), [start]
while stack:
A__ = stack.pop()
explored.add(UpperCamelCase_ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(UpperCamelCase_ )
return explored
_lowercase = {
"A": ["B", "C", "D"],
"B": ["A", "D", "E"],
"C": ["A", "F"],
"D": ["B", "D"],
"E": ["B", "F"],
"F": ["C", "E", "G"],
"G": ["F"],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, "A"))
| 632 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
_UpperCAmelCase = logging.get_logger(__name__)
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
def __init__( self : List[str] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : Tuple ):
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 699 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , _snake_case : Tuple , _snake_case : Optional[int]=7 , _snake_case : Dict=3 , _snake_case : Union[str, Any]=18 , _snake_case : Optional[Any]=30 , _snake_case : Optional[int]=400 , _snake_case : Optional[Any]=True , _snake_case : Union[str, Any]=None , _snake_case : str=True , _snake_case : Any=None , _snake_case : int=True , ):
__lowercase : Any = size if size is not None else {'''shortest_edge''': 20}
__lowercase : Any = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__lowercase : Any = parent
__lowercase : List[Any] = batch_size
__lowercase : int = num_channels
__lowercase : str = image_size
__lowercase : Any = min_resolution
__lowercase : List[Any] = max_resolution
__lowercase : Tuple = do_resize
__lowercase : int = size
__lowercase : Dict = do_center_crop
__lowercase : List[str] = crop_size
__lowercase : Union[str, Any] = do_flip_channel_order
def snake_case_ ( self : Optional[int] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class __lowerCAmelCase ( __lowercase , unittest.TestCase ):
"""simple docstring"""
A__ : Dict = MobileViTImageProcessor if is_vision_available() else None
def snake_case_ ( self : Any ):
__lowercase : str = MobileViTImageProcessingTester(self )
@property
def snake_case_ ( self : Tuple ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self : str ):
__lowercase : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''size''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''center_crop''' ) )
self.assertTrue(hasattr(UpperCamelCase__ , '''do_flip_channel_order''' ) )
def snake_case_ ( self : Optional[int] ):
__lowercase : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__lowercase : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def snake_case_ ( self : Tuple ):
pass
def snake_case_ ( self : Any ):
# Initialize image_processing
__lowercase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
__lowercase : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__lowercase : List[Any] = image_processing(UpperCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case_ ( self : List[str] ):
# Initialize image_processing
__lowercase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
__lowercase : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__lowercase : Optional[int] = image_processing(UpperCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case_ ( self : List[Any] ):
# Initialize image_processing
__lowercase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
__lowercase : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__lowercase : List[Any] = image_processing(UpperCamelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 509 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str = "geglu" , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : str = "layer_norm" , UpperCamelCase__ : bool = False , ):
super().__init__()
A = only_cross_attention
A = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
A = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
A = AdaLayerNorm(UpperCamelCase__ , UpperCamelCase__ )
elif self.use_ada_layer_norm_zero:
A = AdaLayerNormZero(UpperCamelCase__ , UpperCamelCase__ )
else:
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
A = Attention(
query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , dropout=UpperCamelCase__ , bias=UpperCamelCase__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=UpperCamelCase__ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
A = (
AdaLayerNorm(UpperCamelCase__ , UpperCamelCase__ )
if self.use_ada_layer_norm
else nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
)
A = Attention(
query_dim=UpperCamelCase__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , dropout=UpperCamelCase__ , bias=UpperCamelCase__ , upcast_attention=UpperCamelCase__ , ) # is self-attn if encoder_hidden_states is none
else:
A = None
A = None
# 3. Feed-forward
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
A = FeedForward(UpperCamelCase__ , dropout=UpperCamelCase__ , activation_fn=UpperCamelCase__ , final_dropout=UpperCamelCase__ )
# let chunk size default to None
A = None
A = 0
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ):
# Sets chunk feed-forward
A = chunk_size
A = dim
def UpperCamelCase ( self : Dict , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , UpperCamelCase__ : Dict[str, Any] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
A = self.norma(UpperCamelCase__ , UpperCamelCase__ )
elif self.use_ada_layer_norm_zero:
A , A , A , A , A = self.norma(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hidden_dtype=hidden_states.dtype )
else:
A = self.norma(UpperCamelCase__ )
A = cross_attention_kwargs if cross_attention_kwargs is not None else {}
A = self.attna(
UpperCamelCase__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
if self.use_ada_layer_norm_zero:
A = gate_msa.unsqueeze(1 ) * attn_output
A = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
A = (
self.norma(UpperCamelCase__ , UpperCamelCase__ ) if self.use_ada_layer_norm else self.norma(UpperCamelCase__ )
)
A = self.attna(
UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
A = attn_output + hidden_states
# 3. Feed-forward
A = self.norma(UpperCamelCase__ )
if self.use_ada_layer_norm_zero:
A = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
A = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
A = torch.cat(
[self.ff(UpperCamelCase__ ) for hid_slice in norm_hidden_states.chunk(UpperCamelCase__ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
A = self.ff(UpperCamelCase__ )
if self.use_ada_layer_norm_zero:
A = gate_mlp.unsqueeze(1 ) * ff_output
A = ff_output + hidden_states
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 4 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : str = "geglu" , UpperCamelCase__ : bool = False , ):
super().__init__()
A = int(dim * mult )
A = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
A = GELU(UpperCamelCase__ , UpperCamelCase__ )
if activation_fn == "gelu-approximate":
A = GELU(UpperCamelCase__ , UpperCamelCase__ , approximate='tanh' )
elif activation_fn == "geglu":
A = GEGLU(UpperCamelCase__ , UpperCamelCase__ )
elif activation_fn == "geglu-approximate":
A = ApproximateGELU(UpperCamelCase__ , UpperCamelCase__ )
A = nn.ModuleList([] )
# project in
self.net.append(UpperCamelCase__ )
# project dropout
self.net.append(nn.Dropout(UpperCamelCase__ ) )
# project out
self.net.append(nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(UpperCamelCase__ ) )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : int ):
for module in self.net:
A = module(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : str = "none" ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
A = approximate
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Dict ):
if gate.device.type != "mps":
return F.gelu(UpperCamelCase__ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def UpperCamelCase ( self : Any , UpperCamelCase__ : int ):
A = self.proj(UpperCamelCase__ )
A = self.gelu(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , dim_out * 2 )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Tuple ):
if gate.device.type != "mps":
return F.gelu(UpperCamelCase__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def UpperCamelCase ( self : str , UpperCamelCase__ : str ):
A , A = self.proj(UpperCamelCase__ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(UpperCamelCase__ )
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase ( self : Any , UpperCamelCase__ : Optional[int] ):
A = self.proj(UpperCamelCase__ )
return x * torch.sigmoid(1.702 * x )
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ):
super().__init__()
A = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
A = nn.SiLU()
A = nn.Linear(UpperCamelCase__ , embedding_dim * 2 )
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ):
A = self.linear(self.silu(self.emb(UpperCamelCase__ ) ) )
A , A = torch.chunk(UpperCamelCase__ , 2 )
A = self.norm(UpperCamelCase__ ) * (1 + scale) + shift
return x
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : str , UpperCamelCase__ : int , UpperCamelCase__ : List[str] ):
super().__init__()
A = CombinedTimestepLabelEmbeddings(UpperCamelCase__ , UpperCamelCase__ )
A = nn.SiLU()
A = nn.Linear(UpperCamelCase__ , 6 * embedding_dim , bias=UpperCamelCase__ )
A = nn.LayerNorm(UpperCamelCase__ , elementwise_affine=UpperCamelCase__ , eps=1e-6 )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=None ):
A = self.linear(self.silu(self.emb(UpperCamelCase__ , UpperCamelCase__ , hidden_dtype=UpperCamelCase__ ) ) )
A , A , A , A , A , A = emb.chunk(6 , dim=1 )
A = self.norm(UpperCamelCase__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : float = 1e-5 ):
super().__init__()
A = num_groups
A = eps
if act_fn is None:
A = None
else:
A = get_activation(UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , out_dim * 2 )
def UpperCamelCase ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : str ):
if self.act:
A = self.act(UpperCamelCase__ )
A = self.linear(UpperCamelCase__ )
A = emb[:, :, None, None]
A , A = emb.chunk(2 , dim=1 )
A = F.group_norm(UpperCamelCase__ , self.num_groups , eps=self.eps )
A = x * (1 + scale) + shift
return x
| 699 | 0 |
"""simple docstring"""
import sys
def A_ ( lowercase ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Dict = len(lowercase )
UpperCAmelCase_ : Dict = [[0 for x in range(lowercase )] for x in range(lowercase )]
UpperCAmelCase_ : Dict = [[0 for x in range(lowercase )] for x in range(lowercase )]
for chain_length in range(2 , lowercase ):
for a in range(1 , n - chain_length + 1 ):
UpperCAmelCase_ : List[Any] = a + chain_length - 1
UpperCAmelCase_ : Dict = sys.maxsize
for c in range(lowercase , lowercase ):
UpperCAmelCase_ : List[str] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCAmelCase_ : Union[str, Any] = cost
UpperCAmelCase_ : int = c
return matrix, sol
def A_ ( lowercase , lowercase , lowercase ) -> List[str]:
"""simple docstring"""
if i == j:
print("""A""" + str(lowercase ) , end=""" """ )
else:
print("""(""" , end=""" """ )
print_optiomal_solution(lowercase , lowercase , optimal_solution[i][j] )
print_optiomal_solution(lowercase , optimal_solution[i][j] + 1 , lowercase )
print(""")""" , end=""" """ )
def A_ ( ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = [30, 35, 15, 5, 10, 20, 25]
UpperCAmelCase_ : Optional[Any] = len(lowercase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCAmelCase_ ,UpperCAmelCase_ : str = matrix_chain_order(lowercase )
print("""No. of Operation required: """ + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowercase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 470 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"vocab_file": "vocab.json",
"tokenizer_config_file": "tokenizer_config.json",
"merges_file": "merges.txt",
}
_UpperCAmelCase = {
"vocab_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"
),
},
"tokenizer_config_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"
),
},
"merges_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"
),
},
}
_UpperCAmelCase = "</w>"
_UpperCAmelCase = "@@ "
def __UpperCamelCase (lowerCAmelCase : Optional[int] ) -> List[str]:
A = set()
A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A = char
return pairs
# Speech2Text2 has no max input length
_UpperCAmelCase = {"facebook/s2t-wav2vec2-large-en-de": 1_024}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Any = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int]="<s>" , UpperCamelCase__ : str="<pad>" , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : Tuple="<unk>" , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : List[str]=None , **UpperCamelCase__ : Optional[int] , ):
super().__init__(
unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , **UpperCamelCase__ , )
A = do_lower_case
with open(UpperCamelCase__ , encoding='utf-8' ) as vocab_handle:
A = json.load(UpperCamelCase__ )
A = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
A = None
A = None
else:
with open(UpperCamelCase__ , encoding='utf-8' ) as merges_handle:
A = merges_handle.read().split('\n' )[:-1]
A = [tuple(merge.split()[:2] ) for merge in merges]
A = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
A = {}
@property
def UpperCamelCase ( self : Union[str, Any] ):
return len(self.decoder )
def UpperCamelCase ( self : Optional[Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Optional[int] ):
A = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
A = get_pairs(UpperCamelCase__ )
if not pairs:
return token
while True:
A = min(UpperCamelCase__ , key=lambda UpperCamelCase__ : self.bpe_ranks.get(UpperCamelCase__ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
A , A = bigram
A = []
A = 0
while i < len(UpperCamelCase__ ):
try:
A = word.index(UpperCamelCase__ , UpperCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A = j
if word[i] == first and i < len(UpperCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A = tuple(UpperCamelCase__ )
A = new_word
if len(UpperCamelCase__ ) == 1:
break
else:
A = get_pairs(UpperCamelCase__ )
A = ' '.join(UpperCamelCase__ )
if word == "\n " + BPE_TOKEN_MERGES:
A = '\n' + BPE_TOKEN_MERGES
if word.endswith(UpperCamelCase__ ):
A = word.replace(UpperCamelCase__ , '' )
A = word.replace(' ' , UpperCamelCase__ )
A = word
return word
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : Dict ):
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
A = text.lower()
A = text.split()
A = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(UpperCamelCase__ ).split(' ' ) ) )
return split_tokens
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : str ):
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def UpperCamelCase ( self : str , UpperCamelCase__ : int ):
A = self.decoder.get(UpperCamelCase__ , self.unk_token )
return result
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : List[str] ):
A = ' '.join(UpperCamelCase__ )
# make sure @@ tokens are concatenated
A = ''.join(string.split(UpperCamelCase__ ) )
return string
def UpperCamelCase ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
A = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) + '\n' )
A = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
' Please check that the tokenizer is not corrupted!' )
A = token_index
writer.write(' '.join(UpperCamelCase__ ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 699 | 0 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def UpperCamelCase ( _lowerCamelCase : str , _lowerCamelCase : dict ):
A__ = BeautifulSoup(requests.get(_lowerCamelCase , params=_lowerCamelCase ).content , "html.parser" )
A__ = soup.find("div" , attrs={"class": "gs_ri"} )
A__ = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
__lowerCAmelCase : Any ={
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 440 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = '''facebook/bart-large-mnli'''
SCREAMING_SNAKE_CASE : Union[str, Any] = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
SCREAMING_SNAKE_CASE : Any = '''text_classifier'''
SCREAMING_SNAKE_CASE : Any = AutoTokenizer
SCREAMING_SNAKE_CASE : Dict = AutoModelForSequenceClassification
SCREAMING_SNAKE_CASE : List[Any] = ['''text''', ['''text''']]
SCREAMING_SNAKE_CASE : Dict = ['''text''']
def UpperCamelCase ( self : List[str] ):
super().setup()
A = self.model.config
A = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail' ):
A = int(UpperCamelCase__ )
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.' )
def UpperCamelCase ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ):
A = labels
return self.pre_processor(
[text] * len(UpperCamelCase__ ) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def UpperCamelCase ( self : int , UpperCamelCase__ : List[str] ):
A = outputs.logits
A = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 699 | 0 |
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class __lowerCamelCase ( yaml.SafeLoader ):
"""simple docstring"""
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> int:
lowerCAmelCase__ = [self.constructed_objects[key_node] for key_node, _ in node.value]
lowerCAmelCase__ = [tuple(UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else key for key in keys]
lowerCAmelCase__ = Counter(UpperCamelCase__ )
lowerCAmelCase__ = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'Got duplicate yaml keys: {duplicate_keys}' )
def a ( self : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str]=False ) -> Tuple:
lowerCAmelCase__ = super().construct_mapping(UpperCamelCase__ , deep=UpperCamelCase__ )
self._check_no_duplicates_on_constructed_node(UpperCamelCase__ )
return mapping
def _A ( lowerCAmelCase_ : str ):
"""simple docstring"""
lowerCAmelCase__ = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
lowerCAmelCase__ = full_content[1:].index("---" ) + 1
lowerCAmelCase__ = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowerCAmelCase_ )
class __lowerCamelCase ( __lowercase ):
"""simple docstring"""
snake_case__ = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def a ( cls : int , SCREAMING_SNAKE_CASE__ : Path ) -> Tuple:
with open(UpperCamelCase__ , encoding="utf-8" ) as readme_file:
lowerCAmelCase__ , lowerCAmelCase__ = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(UpperCamelCase__ )
else:
return cls()
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Path ) -> str:
if path.exists():
with open(UpperCamelCase__ , encoding="utf-8" ) as readme_file:
lowerCAmelCase__ = readme_file.read()
else:
lowerCAmelCase__ = None
lowerCAmelCase__ = self._to_readme(UpperCamelCase__ )
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as readme_file:
readme_file.write(UpperCamelCase__ )
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[str] = None ) -> int:
if readme_content is not None:
lowerCAmelCase__ , lowerCAmelCase__ = _split_yaml_from_readme(UpperCamelCase__ )
lowerCAmelCase__ = "---\n" + self.to_yaml_string() + "---\n" + content
else:
lowerCAmelCase__ = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def a ( cls : str , SCREAMING_SNAKE_CASE__ : str ) -> Any:
lowerCAmelCase__ = yaml.load(UpperCamelCase__ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
lowerCAmelCase__ = {
(key.replace("-" , "_" ) if key.replace("-" , "_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**UpperCamelCase__ )
def a ( self : int ) -> Tuple:
return yaml.safe_dump(
{
(key.replace("_" , "-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=UpperCamelCase__ , allow_unicode=UpperCamelCase__ , encoding="utf-8" , ).decode("utf-8" )
UpperCamelCase = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
UpperCamelCase = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
UpperCamelCase = ap.parse_args()
UpperCamelCase = Path(args.readme_filepath)
UpperCamelCase = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 61 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase (lowerCAmelCase : List[str] ) -> Dict:
A = r'\w+[.]\d+'
A = re.findall(lowerCAmelCase, lowerCAmelCase )
for pat in pats:
A = key.replace(lowerCAmelCase, '_'.join(pat.split('.' ) ) )
return key
def __UpperCamelCase (lowerCAmelCase : Optional[int], lowerCAmelCase : Dict, lowerCAmelCase : Dict ) -> Any:
A = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
A = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
A = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
A = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
A = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
A = pt_tensor.transpose(2, 3, 1, 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
A = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
A = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __UpperCamelCase (lowerCAmelCase : Tuple, lowerCAmelCase : Any, lowerCAmelCase : str=42 ) -> Any:
# Step 1: Convert pytorch tensor to numpy
A = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
A = flax_model.init_weights(PRNGKey(lowerCAmelCase ) )
A = flatten_dict(lowerCAmelCase )
A = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A = rename_key(lowerCAmelCase )
A = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
A , A = rename_key_and_reshape_tensor(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
A = jnp.asarray(lowerCAmelCase )
return unflatten_dict(lowerCAmelCase )
| 699 | 0 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCamelCase__( __lowercase , __lowercase , unittest.TestCase ):
__magic_name__ : List[Any] = IFInpaintingPipeline
__magic_name__ : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
__magic_name__ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__magic_name__ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def a__( self : List[str] )-> Optional[Any]:
"""simple docstring"""
return self._get_dummy_components()
def a__( self : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Tuple=0 )-> Tuple:
"""simple docstring"""
if str(UpperCamelCase__ ).startswith('''mps''' ):
UpperCAmelCase = torch.manual_seed(UpperCamelCase__ )
else:
UpperCAmelCase = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
UpperCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def a__( self : str )-> Union[str, Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def a__( self : Tuple )-> int:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def a__( self : int )-> Dict:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def a__( self : Optional[Any] )-> Optional[int]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def a__( self : str )-> Tuple:
"""simple docstring"""
self._test_save_load_local()
def a__( self : Dict )-> Optional[Any]:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 210 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : Collection[float] | None = None ):
if components is None:
A = []
A = list(UpperCamelCase__ )
def __len__( self : List[Any] ):
return len(self.__components )
def __str__( self : str ):
return "(" + ",".join(map(UpperCamelCase__ , self.__components ) ) + ")"
def __add__( self : str , UpperCamelCase__ : Vector ):
A = len(self )
if size == len(UpperCamelCase__ ):
A = [self.__components[i] + other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return Vector(UpperCamelCase__ )
else:
raise Exception('must have the same size' )
def __sub__( self : Dict , UpperCamelCase__ : Vector ):
A = len(self )
if size == len(UpperCamelCase__ ):
A = [self.__components[i] - other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return Vector(UpperCamelCase__ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self : Tuple , UpperCamelCase__ : float ):
...
@overload
def __mul__( self : Dict , UpperCamelCase__ : Vector ):
...
def __mul__( self : Union[str, Any] , UpperCamelCase__ : float | Vector ):
if isinstance(UpperCamelCase__ , (float, int) ):
A = [c * other for c in self.__components]
return Vector(UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(self ) == len(UpperCamelCase__ ):
A = len(self )
A = [self.__components[i] * other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )]
return sum(UpperCamelCase__ )
else: # error case
raise Exception('invalid operand!' )
def UpperCamelCase ( self : Union[str, Any] ):
return Vector(self.__components )
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : int ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def UpperCamelCase ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : float ):
assert -len(self.__components ) <= pos < len(self.__components )
A = value
def UpperCamelCase ( self : str ):
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
A = [c**2 for c in self.__components]
return math.sqrt(sum(UpperCamelCase__ ) )
def UpperCamelCase ( self : Any , UpperCamelCase__ : Vector , UpperCamelCase__ : bool = False ):
A = self * other
A = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def __UpperCamelCase (lowerCAmelCase : int ) -> Vector:
assert isinstance(lowerCAmelCase, lowerCAmelCase )
return Vector([0] * dimension )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> Vector:
assert isinstance(lowerCAmelCase, lowerCAmelCase ) and (isinstance(lowerCAmelCase, lowerCAmelCase ))
A = [0] * dimension
A = 1
return Vector(lowerCAmelCase )
def __UpperCamelCase (lowerCAmelCase : float, lowerCAmelCase : Vector, lowerCAmelCase : Vector ) -> Vector:
assert (
isinstance(lowerCAmelCase, lowerCAmelCase )
and isinstance(lowerCAmelCase, lowerCAmelCase )
and (isinstance(lowerCAmelCase, (int, float) ))
)
return x * scalar + y
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int ) -> Vector:
random.seed(lowerCAmelCase )
A = [random.randint(lowerCAmelCase, lowerCAmelCase ) for _ in range(lowerCAmelCase )]
return Vector(lowerCAmelCase )
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : list[list[float]] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
A = matrix
A = w
A = h
def __str__( self : int ):
A = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Optional[Any] , UpperCamelCase__ : Matrix ):
if self.__width == other.width() and self.__height == other.height():
A = []
for i in range(self.__height ):
A = [
self.__matrix[i][j] + other.component(UpperCamelCase__ , UpperCamelCase__ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase__ )
return Matrix(UpperCamelCase__ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self : Dict , UpperCamelCase__ : Matrix ):
if self.__width == other.width() and self.__height == other.height():
A = []
for i in range(self.__height ):
A = [
self.__matrix[i][j] - other.component(UpperCamelCase__ , UpperCamelCase__ )
for j in range(self.__width )
]
matrix.append(UpperCamelCase__ )
return Matrix(UpperCamelCase__ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self : int , UpperCamelCase__ : float ):
...
@overload
def __mul__( self : Union[str, Any] , UpperCamelCase__ : Vector ):
...
def __mul__( self : Tuple , UpperCamelCase__ : float | Vector ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ): # matrix-vector
if len(UpperCamelCase__ ) == self.__width:
A = zero_vector(self.__height )
for i in range(self.__height ):
A = [
self.__matrix[i][j] * other.component(UpperCamelCase__ )
for j in range(self.__width )
]
ans.change_component(UpperCamelCase__ , sum(UpperCamelCase__ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(UpperCamelCase__ , (int, float) ): # matrix-scalar
A = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(UpperCamelCase__ , self.__width , self.__height )
return None
def UpperCamelCase ( self : Optional[int] ):
return self.__height
def UpperCamelCase ( self : List[Any] ):
return self.__width
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def UpperCamelCase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float ):
if 0 <= x < self.__height and 0 <= y < self.__width:
A = value
else:
raise Exception('change_component: indices out of bounds' )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
A = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(UpperCamelCase__ ) ):
A = minor[i][:y] + minor[i][y + 1 :]
return Matrix(UpperCamelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def UpperCamelCase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(UpperCamelCase__ , UpperCamelCase__ )
else:
raise Exception('Indices out of bounds' )
def UpperCamelCase ( self : Tuple ):
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
A = [
self.__matrix[0][y] * self.cofactor(0 , UpperCamelCase__ ) for y in range(self.__width )
]
return sum(UpperCamelCase__ )
def __UpperCamelCase (lowerCAmelCase : int ) -> Matrix:
A = [[0] * n for _ in range(lowerCAmelCase )]
return Matrix(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int ) -> Matrix:
random.seed(lowerCAmelCase )
A = [
[random.randint(lowerCAmelCase, lowerCAmelCase ) for _ in range(lowerCAmelCase )] for _ in range(lowerCAmelCase )
]
return Matrix(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
| 699 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = """▁"""
__snake_case = {"""vocab_file""": """sentencepiece.bpe.model"""}
__snake_case = {
"""vocab_file""": {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""",
}
}
__snake_case = {
"""facebook/xglm-564M""": 20_48,
}
class lowercase__ ( __lowercase ):
A__ : str =VOCAB_FILES_NAMES
A__ : Optional[int] =PRETRAINED_VOCAB_FILES_MAP
A__ : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[Any] =['''input_ids''', '''attention_mask''']
def __init__( self : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int]="<s>" , UpperCAmelCase_ : str="</s>" , UpperCAmelCase_ : Tuple="</s>" , UpperCAmelCase_ : Optional[Any]="<s>" , UpperCAmelCase_ : int="<unk>" , UpperCAmelCase_ : Dict="<pad>" , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , **UpperCAmelCase_ : Tuple , ):
SCREAMING_SNAKE_CASE__ = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
SCREAMING_SNAKE_CASE__ = 7
SCREAMING_SNAKE_CASE__ = [F'<madeupword{i}>' for i in range(self.num_madeup_words )]
SCREAMING_SNAKE_CASE__ = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE__ = 1
# Mimic fairseq token-to-id alignment for the first 4 token
SCREAMING_SNAKE_CASE__ = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
SCREAMING_SNAKE_CASE__ = len(self.sp_model )
SCREAMING_SNAKE_CASE__ = {F'<madeupword{i}>': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : List[Any] ):
SCREAMING_SNAKE_CASE__ = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Union[str, Any] , UpperCAmelCase_ : Any ):
SCREAMING_SNAKE_CASE__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def A_ ( self : List[str] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def A_ ( self : Optional[int] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ ))
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ ))
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def A_ ( self : str ):
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def A_ ( self : int ):
SCREAMING_SNAKE_CASE__ = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A_ ( self : List[Any] , UpperCAmelCase_ : str ):
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def A_ ( self : Optional[int] , UpperCAmelCase_ : Optional[int] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE__ = self.sp_model.PieceToId(UpperCamelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A_ ( self : Dict , UpperCAmelCase_ : str ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def A_ ( self : Any , UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE__ = ''.join(UpperCamelCase__ ).replace(UpperCamelCase__ , ' ' ).strip()
return out_string
def A_ ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE__ = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , 'wb' ) as fi:
SCREAMING_SNAKE_CASE__ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
| 472 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = '''blenderbot-small'''
SCREAMING_SNAKE_CASE : Any = ['''past_key_values''']
SCREAMING_SNAKE_CASE : List[str] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[str] , UpperCamelCase__ : Optional[Any]=50265 , UpperCamelCase__ : Optional[int]=512 , UpperCamelCase__ : int=8 , UpperCamelCase__ : Optional[int]=2048 , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Optional[Any]=8 , UpperCamelCase__ : List[Any]=2048 , UpperCamelCase__ : int=16 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Any=512 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Union[str, Any]=0.0 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Dict=2 , **UpperCamelCase__ : List[str] , ):
A = vocab_size
A = max_position_embeddings
A = d_model
A = encoder_ffn_dim
A = encoder_layers
A = encoder_attention_heads
A = decoder_ffn_dim
A = decoder_layers
A = decoder_attention_heads
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = encoder_layerdrop
A = decoder_layerdrop
A = use_cache
A = encoder_layers
A = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , forced_eos_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
@property
def UpperCamelCase ( self : List[Any] ):
if self.task in ["default", "seq2seq-lm"]:
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
A = {0: 'batch'}
A = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
A = {0: 'batch', 1: 'decoder_sequence'}
A = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
A , A = self.num_layers
for i in range(UpperCamelCase__ ):
A = {0: 'batch', 2: 'past_sequence + sequence'}
A = {0: 'batch', 2: 'past_sequence + sequence'}
else:
A = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def UpperCamelCase ( self : int ):
if self.task in ["default", "seq2seq-lm"]:
A = super().outputs
else:
A = super(UpperCamelCase__ , self ).outputs
if self.use_past:
A , A = self.num_layers
for i in range(UpperCamelCase__ ):
A = {0: 'batch', 2: 'past_sequence + sequence'}
A = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def UpperCamelCase ( self : int , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Generate decoder inputs
A = seq_length if not self.use_past else 1
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
A = dict(**UpperCamelCase__ , **UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A , A = common_inputs['input_ids'].shape
A = common_inputs['decoder_input_ids'].shape[1]
A , A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = decoder_seq_length + 3
A = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(UpperCamelCase__ , UpperCamelCase__ )] , dim=1 )
A = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A , A = self.num_layers
A = min(UpperCamelCase__ , UpperCamelCase__ )
A = max(UpperCamelCase__ , UpperCamelCase__ ) - min_num_layers
A = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(UpperCamelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
torch.zeros(UpperCamelCase__ ),
) )
# TODO: test this.
A = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(UpperCamelCase__ , UpperCamelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) )
return common_inputs
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
A , A = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
A = seqlen + 2
A , A = self.num_layers
A , A = self.num_attention_heads
A = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A = common_inputs['attention_mask'].dtype
A = torch.cat(
[common_inputs['attention_mask'], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
A = [
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(UpperCamelCase__ )
]
return common_inputs
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A = tokenizer.num_special_tokens_to_add(UpperCamelCase__ )
A = compute_effective_axis_dimension(
UpperCamelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase__ )
# Generate dummy inputs according to compute batch and sequence
A = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
A = dict(tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ ) )
return common_inputs
def UpperCamelCase ( self : Any , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
A = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
elif self.task == "causal-lm":
A = self._generate_dummy_inputs_for_causal_lm(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
else:
A = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
return common_inputs
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple ):
if self.task in ["default", "seq2seq-lm"]:
A = super()._flatten_past_key_values_(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
A = super(UpperCamelCase__ , self )._flatten_past_key_values_(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
| 699 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase__ ( __lowercase ):
"""simple docstring"""
def __init__( self : List[Any] , lowercase__ : TransformeraDModel , lowercase__ : AutoencoderKL , lowercase__ : KarrasDiffusionSchedulers , lowercase__ : Optional[Dict[int, str]] = None , ):
super().__init__()
self.register_modules(transformer=UpperCamelCase__ , vae=UpperCamelCase__ , scheduler=UpperCamelCase__ )
# create a imagenet -> id dictionary for easier use
__lowercase : List[Any] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
__lowercase : List[str] = int(UpperCamelCase__ )
__lowercase : str = dict(sorted(self.labels.items() ) )
def snake_case ( self : int , lowercase__ : Union[str, List[str]] ):
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__lowercase : Tuple = list(UpperCamelCase__ )
for l in label:
if l not in self.labels:
raise ValueError(
f'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : Dict , lowercase__ : List[int] , lowercase__ : float = 4.0 , lowercase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase__ : int = 5_0 , lowercase__ : Optional[str] = "pil" , lowercase__ : bool = True , ):
__lowercase : List[Any] = len(UpperCamelCase__ )
__lowercase : Union[str, Any] = self.transformer.config.sample_size
__lowercase : Optional[int] = self.transformer.config.in_channels
__lowercase : int = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=UpperCamelCase__ , device=self.device , dtype=self.transformer.dtype , )
__lowercase : int = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
__lowercase : int = torch.tensor(UpperCamelCase__ , device=self.device ).reshape(-1 )
__lowercase : List[str] = torch.tensor([1_0_0_0] * batch_size , device=self.device )
__lowercase : int = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(UpperCamelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
__lowercase : Tuple = latent_model_input[: len(UpperCamelCase__ ) // 2]
__lowercase : Tuple = torch.cat([half, half] , dim=0 )
__lowercase : int = self.scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
__lowercase : Tuple = t
if not torch.is_tensor(UpperCamelCase__ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__lowercase : Optional[Any] = latent_model_input.device.type == "mps"
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__lowercase : Any = torch.floataa if is_mps else torch.floataa
else:
__lowercase : str = torch.intaa if is_mps else torch.intaa
__lowercase : Union[str, Any] = torch.tensor([timesteps] , dtype=UpperCamelCase__ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
__lowercase : List[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__lowercase : List[str] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
__lowercase : Union[str, Any] = self.transformer(
UpperCamelCase__ , timestep=UpperCamelCase__ , class_labels=UpperCamelCase__ ).sample
# perform guidance
if guidance_scale > 1:
__lowercase ,__lowercase : str = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__lowercase ,__lowercase : List[str] = torch.split(UpperCamelCase__ , len(UpperCamelCase__ ) // 2 , dim=0 )
__lowercase : List[Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__lowercase : Tuple = torch.cat([half_eps, half_eps] , dim=0 )
__lowercase : Dict = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__lowercase ,__lowercase : Tuple = torch.split(UpperCamelCase__ , UpperCamelCase__ , dim=1 )
else:
__lowercase : Optional[Any] = noise_pred
# compute previous image: x_t -> x_t-1
__lowercase : List[Any] = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
if guidance_scale > 1:
__lowercase ,__lowercase : str = latent_model_input.chunk(2 , dim=0 )
else:
__lowercase : Optional[int] = latent_model_input
__lowercase : int = 1 / self.vae.config.scaling_factor * latents
__lowercase : List[Any] = self.vae.decode(UpperCamelCase__ ).sample
__lowercase : Optional[int] = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__lowercase : Tuple = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowercase : Tuple = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 575 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( __lowercase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE : List[str] = '''BridgeTowerImageProcessor'''
SCREAMING_SNAKE_CASE : Tuple = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] ):
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[bool, str, PaddingStrategy] = False , UpperCamelCase__ : Union[bool, str, TruncationStrategy] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 0 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : List[Any] , ):
A = self.tokenizer(
text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
# add pixel_values + pixel_mask
A = self.image_processor(
UpperCamelCase__ , return_tensors=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_center_crop=UpperCamelCase__ , **UpperCamelCase__ )
encoding.update(UpperCamelCase__ )
return encoding
def UpperCamelCase ( self : Dict , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Any ):
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def UpperCamelCase ( self : int , *UpperCamelCase__ : int , **UpperCamelCase__ : List[str] ):
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def UpperCamelCase ( self : Any ):
A = self.tokenizer.model_input_names
A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 699 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
_lowerCAmelCase : int = logging.get_logger(__name__)
_lowerCAmelCase : Optional[int] = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class __magic_name__ ( __lowercase ):
"""simple docstring"""
__UpperCamelCase = '''longformer'''
def __init__( self :Dict , snake_case :Union[List[int], int] = 512 , snake_case :int = 2 , snake_case :int = 1 , snake_case :int = 0 , snake_case :int = 2 , snake_case :int = 30_522 , snake_case :int = 768 , snake_case :int = 12 , snake_case :int = 12 , snake_case :int = 3_072 , snake_case :str = "gelu" , snake_case :float = 0.1 , snake_case :float = 0.1 , snake_case :int = 512 , snake_case :int = 2 , snake_case :float = 0.02 , snake_case :float = 1e-12 , snake_case :bool = False , **snake_case :Dict , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A_ : List[str] = attention_window
A_ : Dict = sep_token_id
A_ : str = bos_token_id
A_ : Tuple = eos_token_id
A_ : List[str] = vocab_size
A_ : Dict = hidden_size
A_ : List[str] = num_hidden_layers
A_ : int = num_attention_heads
A_ : Dict = hidden_act
A_ : int = intermediate_size
A_ : Tuple = hidden_dropout_prob
A_ : List[str] = attention_probs_dropout_prob
A_ : List[Any] = max_position_embeddings
A_ : Optional[Any] = type_vocab_size
A_ : Union[str, Any] = initializer_range
A_ : Optional[int] = layer_norm_eps
A_ : List[Any] = onnx_export
class __magic_name__ ( __lowercase ):
"""simple docstring"""
def __init__( self :Tuple , snake_case :"PretrainedConfig" , snake_case :str = "default" , snake_case :"List[PatchingSpec]" = None ):
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A_ : str = True
@property
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
A_ : Union[str, Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
A_ : int = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : int = super().outputs
if self.task == "default":
A_ : Tuple = {0: "batch"}
return outputs
@property
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
return 1e-4
@property
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
return max(super().default_onnx_opset , 14 )
def SCREAMING_SNAKE_CASE ( self :str , snake_case :"PreTrainedTokenizerBase" , snake_case :int = -1 , snake_case :int = -1 , snake_case :bool = False , snake_case :Optional[TensorType] = None , ):
'''simple docstring'''
A_ : Tuple = super().generate_dummy_inputs(
preprocessor=UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
A_ : List[str] = torch.zeros_like(inputs["input_ids"] )
# make every second token global
A_ : Dict = 1
return inputs
| 454 |
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> str:
return "\n".join(
f'''{number} * {i} = {number * i}''' for i in range(1, number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 699 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class _lowerCAmelCase :
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=64 , UpperCamelCase__=2 , UpperCamelCase__=3 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=10 , UpperCamelCase__=0.02 , UpperCamelCase__=[1, 16, 4, 4] , UpperCamelCase__=None , ) -> List[Any]:
'''simple docstring'''
snake_case : List[str] = parent
snake_case : Optional[int] = batch_size
snake_case : Tuple = image_size
snake_case : List[Any] = patch_size
snake_case : Optional[Any] = num_channels
snake_case : Optional[Any] = is_training
snake_case : int = use_labels
snake_case : Tuple = hidden_size
snake_case : Any = num_hidden_layers
snake_case : List[str] = num_attention_heads
snake_case : Union[str, Any] = intermediate_size
snake_case : int = hidden_act
snake_case : Optional[Any] = hidden_dropout_prob
snake_case : Optional[int] = attention_probs_dropout_prob
snake_case : Optional[int] = type_sequence_label_size
snake_case : List[Any] = initializer_range
snake_case : Optional[Any] = scope
snake_case : Union[str, Any] = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
snake_case : Any = (self.image_size // 32) ** 2
snake_case : Optional[int] = num_patches + 1
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case : int = None
if self.use_labels:
snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : List[str] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
snake_case : int = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [4, 8, 16, 32],
"num_groups": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=UpperCamelCase__ , )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
snake_case : int = ViTHybridModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : Optional[int] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
snake_case : str = self.type_sequence_label_size
snake_case : str = ViTHybridForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
snake_case : int = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Union[str, Any] = self.prepare_config_and_inputs()
snake_case ,snake_case ,snake_case : List[str] = config_and_inputs
snake_case : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __lowercase , __lowercase , unittest.TestCase ):
__UpperCAmelCase : Union[str, Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
__UpperCAmelCase : List[str] = (
{'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase : Dict = False
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : List[Any] = False
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Union[str, Any] = ViTHybridModelTester(self )
snake_case : List[str] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
pass
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case ,snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : List[Any] = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
snake_case ,snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : int = model_class(UpperCamelCase__ )
snake_case : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : Optional[int] = [*signature.parameters.keys()]
snake_case : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case ,snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case : Any = _config_zero_init(UpperCamelCase__ )
for model_class in self.all_model_classes:
snake_case : Union[str, Any] = model_class(config=UpperCamelCase__ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
snake_case : List[Any] = [F'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def lowerCamelCase ( self ) -> List[str]:
'''simple docstring'''
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : Optional[int] = ViTHybridModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def __lowerCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
snake_case : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[int] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
UpperCamelCase__ )
snake_case : int = self.default_image_processor
snake_case : int = prepare_img()
snake_case : Optional[Any] = image_processor(images=UpperCamelCase__ , return_tensors="pt" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
snake_case : Dict = model(**UpperCamelCase__ )
# verify the logits
snake_case : List[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
snake_case : Optional[int] = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
@require_accelerate
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Union[str, Any] = ViTHybridImageProcessor.from_pretrained("google/vit-hybrid-base-bit-384" )
snake_case : Tuple = ViTHybridForImageClassification.from_pretrained("google/vit-hybrid-base-bit-384" , device_map="auto" )
snake_case : Any = prepare_img()
snake_case : Optional[int] = image_processor(images=UpperCamelCase__ , return_tensors="pt" )
snake_case : Any = model(**UpperCamelCase__ )
snake_case : Optional[Any] = outputs.logits
# model predicts one of the 1000 ImageNet classes
snake_case : List[Any] = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , "tabby, tabby cat" )
| 178 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _UpperCAmelCase ( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self : Any , UpperCamelCase__ : int = 128 , UpperCamelCase__ : int = 256 , UpperCamelCase__ : float = 2_000.0 , UpperCamelCase__ : int = 768 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 64 , UpperCamelCase__ : int = 2048 , UpperCamelCase__ : float = 0.1 , ):
super().__init__()
A = nn.Sequential(
nn.Linear(UpperCamelCase__ , d_model * 4 , bias=UpperCamelCase__ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=UpperCamelCase__ ) , nn.SiLU() , )
A = nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
A = False
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Dropout(p=UpperCamelCase__ )
A = nn.ModuleList()
for lyr_num in range(UpperCamelCase__ ):
# FiLM conditional T5 decoder
A = DecoderLayer(d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ )
self.decoders.append(UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ )
A = nn.Dropout(p=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : int ):
A = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] ):
A , A , A = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
A = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
A = self.conditioning_emb(UpperCamelCase__ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
A = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
A = torch.broadcast_to(
torch.arange(UpperCamelCase__ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
A = self.position_encoding(UpperCamelCase__ )
A = self.continuous_inputs_projection(UpperCamelCase__ )
inputs += position_encodings
A = self.dropout(UpperCamelCase__ )
# decoder: No padding present.
A = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
A = [(x, self.encoder_decoder_mask(UpperCamelCase__ , UpperCamelCase__ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
A = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
A = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
A = lyr(
UpperCamelCase__ , conditioning_emb=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )[0]
A = self.decoder_norm(UpperCamelCase__ )
A = self.post_dropout(UpperCamelCase__ )
A = self.spec_out(UpperCamelCase__ )
return spec_out
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]=1e-6 ):
super().__init__()
A = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , dropout_rate=UpperCamelCase__ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=UpperCamelCase__ , d_kv=UpperCamelCase__ , num_heads=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ ) )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[Any]=None , ):
A = self.layer[0](
UpperCamelCase__ , conditioning_emb=UpperCamelCase__ , attention_mask=UpperCamelCase__ , )
if encoder_hidden_states is not None:
A = torch.where(encoder_attention_mask > 0 , 0 , -1e1_0 ).to(
encoder_hidden_states.dtype )
A = self.layer[1](
UpperCamelCase__ , key_value_states=UpperCamelCase__ , attention_mask=UpperCamelCase__ , )
# Apply Film Conditional Feed Forward layer
A = self.layer[-1](UpperCamelCase__ , UpperCamelCase__ )
return (hidden_states,)
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict ):
super().__init__()
A = TaLayerNorm(UpperCamelCase__ )
A = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase__ )
A = Attention(query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , out_bias=UpperCamelCase__ , scale_qk=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Tuple=None , ):
# pre_self_attention_layer_norm
A = self.layer_norm(UpperCamelCase__ )
if conditioning_emb is not None:
A = self.FiLMLayer(UpperCamelCase__ , UpperCamelCase__ )
# Self-attention block
A = self.attention(UpperCamelCase__ )
A = hidden_states + self.dropout(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ):
super().__init__()
A = Attention(query_dim=UpperCamelCase__ , heads=UpperCamelCase__ , dim_head=UpperCamelCase__ , out_bias=UpperCamelCase__ , scale_qk=UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ , eps=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[str]=None , ):
A = self.layer_norm(UpperCamelCase__ )
A = self.attention(
UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , attention_mask=attention_mask.squeeze(1 ) , )
A = hidden_states + self.dropout(UpperCamelCase__ )
return layer_output
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any ):
super().__init__()
A = TaDenseGatedActDense(d_model=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ )
A = TaFiLMLayer(in_features=d_model * 4 , out_features=UpperCamelCase__ )
A = TaLayerNorm(UpperCamelCase__ , eps=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
def UpperCamelCase ( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any=None ):
A = self.layer_norm(UpperCamelCase__ )
if conditioning_emb is not None:
A = self.film(UpperCamelCase__ , UpperCamelCase__ )
A = self.DenseReluDense(UpperCamelCase__ )
A = hidden_states + self.dropout(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
A = nn.Dropout(UpperCamelCase__ )
A = NewGELUActivation()
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] ):
A = self.act(self.wi_a(UpperCamelCase__ ) )
A = self.wi_a(UpperCamelCase__ )
A = hidden_gelu * hidden_linear
A = self.dropout(UpperCamelCase__ )
A = self.wo(UpperCamelCase__ )
return hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple=1e-6 ):
super().__init__()
A = nn.Parameter(torch.ones(UpperCamelCase__ ) )
A = eps
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : int ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
A = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=UpperCamelCase__ )
A = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
A = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def UpperCamelCase ( self : Any , UpperCamelCase__ : torch.Tensor ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044_715 * torch.pow(UpperCamelCase__ , 3.0 )) ))
class _UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : int ):
super().__init__()
A = nn.Linear(UpperCamelCase__ , out_features * 2 , bias=UpperCamelCase__ )
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] ):
A = self.scale_bias(UpperCamelCase__ )
A , A = torch.chunk(UpperCamelCase__ , 2 , -1 )
A = x * (1 + scale) + shift
return x
| 699 | 0 |
'''simple docstring'''
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__lowerCamelCase : Any = logging.get_logger(__name__)
__lowerCamelCase : Union[str, Any] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
__lowerCamelCase : List[Any] = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
for attribute in key.split('''.''' ):
lowerCamelCase_ : Optional[Any] = getattr(__UpperCAmelCase , __UpperCAmelCase )
if weight_type is not None:
lowerCamelCase_ : Optional[Any] = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape
else:
lowerCamelCase_ : List[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowerCamelCase_ : Tuple = value
elif weight_type == "weight_g":
lowerCamelCase_ : Union[str, Any] = value
elif weight_type == "weight_v":
lowerCamelCase_ : Dict = value
elif weight_type == "bias":
lowerCamelCase_ : List[Any] = value
else:
lowerCamelCase_ : List[str] = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __snake_case (__UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = []
lowerCamelCase_ : int = fairseq_model.state_dict()
lowerCamelCase_ : List[Any] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase_ : int = False
if "conv_layers" in name:
load_conv_layer(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hf_model.config.feat_extract_norm == '''group''' , )
lowerCamelCase_ : List[str] = True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase_ : Tuple = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
lowerCamelCase_ : List[str] = True
if "*" in mapped_key:
lowerCamelCase_ : List[str] = name.split(__UpperCAmelCase )[0].split('''.''' )[-2]
lowerCamelCase_ : str = mapped_key.replace('''*''' , __UpperCAmelCase )
if "weight_g" in name:
lowerCamelCase_ : List[Any] = '''weight_g'''
elif "weight_v" in name:
lowerCamelCase_ : Dict = '''weight_v'''
elif "bias" in name:
lowerCamelCase_ : Optional[int] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase_ : Any = '''weight'''
else:
lowerCamelCase_ : Optional[int] = None
set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
continue
if not is_used:
unused_weights.append(__UpperCAmelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Optional[int] = full_name.split('''conv_layers.''' )[-1]
lowerCamelCase_ : str = name.split('''.''' )
lowerCamelCase_ : int = int(items[0] )
lowerCamelCase_ : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowerCamelCase_ : List[str] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowerCamelCase_ : Any = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
lowerCamelCase_ : List[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
lowerCamelCase_ : Any = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__UpperCAmelCase )
@torch.no_grad()
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True ):
"""simple docstring"""
if config_path is not None:
lowerCamelCase_ : List[str] = UniSpeechSatConfig.from_pretrained(__UpperCAmelCase )
else:
lowerCamelCase_ : Tuple = UniSpeechSatConfig()
lowerCamelCase_ : str = ''''''
if is_finetuned:
lowerCamelCase_ : int = UniSpeechSatForCTC(__UpperCAmelCase )
else:
lowerCamelCase_ : Optional[int] = UniSpeechSatForPreTraining(__UpperCAmelCase )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
lowerCamelCase_ : Union[str, Any] = model[0].eval()
recursively_load_weights(__UpperCAmelCase , __UpperCAmelCase )
hf_wavavec.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
__lowerCamelCase : str = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__lowerCamelCase : str = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 501 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
_UpperCAmelCase = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
_UpperCAmelCase = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ):
A = WATERMARK_BITS
A = WatermarkEncoder()
self.encoder.set_watermark('bits' , self.watermark )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : torch.FloatTensor ):
# can't encode images that are smaller than 256
if images.shape[-1] < 256:
return images
A = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
A = [self.encoder.encode(UpperCamelCase__ , 'dwtDct' ) for image in images]
A = torch.from_numpy(np.array(UpperCamelCase__ ) ).permute(0 , 3 , 1 , 2 )
A = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 699 | 0 |
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase__ ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : int )-> Dict:
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase__ ( UpperCamelCase_ : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any] )-> List[Any]:
A__ = tmp_path / '''cache'''
A__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A__ = JsonDatasetReader(UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ ).read()
_check_json_dataset(UpperCamelCase_ , UpperCamelCase_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase__ ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] )-> Tuple:
A__ = tmp_path / '''cache'''
A__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
A__ = features.copy() if features else default_expected_features
A__ = (
Features({feature: Value(UpperCamelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
A__ = JsonDatasetReader(UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ ).read()
_check_json_dataset(UpperCamelCase_ , UpperCamelCase_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''},
] , )
def lowerCAmelCase__ ( UpperCamelCase_ : Tuple , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict )-> Union[str, Any]:
A__ = tmp_path / '''cache'''
A__ = {'''col_3''': '''float64''', '''col_1''': '''string''', '''col_2''': '''int64'''}
A__ = features.copy() if features else default_expected_features
A__ = (
Features({feature: Value(UpperCamelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
A__ = JsonDatasetReader(UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ ).read()
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def lowerCAmelCase__ ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Dict )-> Dict:
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
A__ = {'''col_2''': '''int64''', '''col_3''': '''float64''', '''col_1''': '''string'''}
A__ = features.copy()
A__ = (
Features({feature: Value(UpperCamelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
A__ = tmp_path / '''cache'''
A__ = JsonDatasetReader(UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ ).read()
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase__ ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] )-> str:
A__ = tmp_path / '''cache'''
A__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
A__ = JsonDatasetReader(UpperCamelCase_ , cache_dir=UpperCamelCase_ , split=UpperCamelCase_ ).read()
_check_json_dataset(UpperCamelCase_ , UpperCamelCase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowerCAmelCase__ ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple )-> Any:
if issubclass(UpperCamelCase_ , UpperCamelCase_ ):
A__ = jsonl_path
elif issubclass(UpperCamelCase_ , UpperCamelCase_ ):
A__ = [jsonl_path]
A__ = tmp_path / '''cache'''
A__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
A__ = JsonDatasetReader(UpperCamelCase_ , cache_dir=UpperCamelCase_ ).read()
_check_json_dataset(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any]=("train",) )-> str:
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
for split in splits:
A__ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase__ ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[int] )-> int:
A__ = tmp_path / '''cache'''
A__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A__ = JsonDatasetReader({'''train''': jsonl_path} , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ ).read()
_check_json_datasetdict(UpperCamelCase_ , UpperCamelCase_ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase__ ( UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple )-> Any:
A__ = tmp_path / '''cache'''
A__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
A__ = features.copy() if features else default_expected_features
A__ = (
Features({feature: Value(UpperCamelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
A__ = JsonDatasetReader({'''train''': jsonl_path} , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ ).read()
_check_json_datasetdict(UpperCamelCase_ , UpperCamelCase_ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase__ ( UpperCamelCase_ : Any , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[int] )-> List[Any]:
if split:
A__ = {split: jsonl_path}
else:
A__ = '''train'''
A__ = {'''train''': jsonl_path, '''test''': jsonl_path}
A__ = tmp_path / '''cache'''
A__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
A__ = JsonDatasetReader(UpperCamelCase_ , cache_dir=UpperCamelCase_ ).read()
_check_json_datasetdict(UpperCamelCase_ , UpperCamelCase_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase__ ( UpperCamelCase_ : Any )-> Any:
return json.load(UpperCamelCase_ )
def lowerCAmelCase__ ( UpperCamelCase_ : Union[str, Any] )-> Union[str, Any]:
return [json.loads(UpperCamelCase_ ) for line in buffer]
class _UpperCAmelCase :
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)])
def snake_case_ ( self , a__ , a__ , a__):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , lines=UpperCamelCase__).write()
buffer.seek(0)
A__ = load_json_function(UpperCamelCase__)
assert isinstance(UpperCamelCase__ , UpperCamelCase__)
assert isinstance(exported_content[0] , UpperCamelCase__)
assert len(UpperCamelCase__) == 1_0
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789'''), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , lines=UpperCamelCase__ , orient=UpperCamelCase__).write()
buffer.seek(0)
A__ = load_json(UpperCamelCase__)
assert isinstance(UpperCamelCase__ , UpperCamelCase__)
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase__ , '''keys''') and not hasattr(exported_content[0] , '''keys''')
if len_at:
assert len(exported_content[len_at]) == 1_0
else:
assert len(UpperCamelCase__) == 1_0
@pytest.mark.parametrize('''lines, load_json_function''' , [(True, load_json_lines), (False, load_json)])
def snake_case_ ( self , a__ , a__ , a__):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , lines=UpperCamelCase__ , num_proc=2).write()
buffer.seek(0)
A__ = load_json_function(UpperCamelCase__)
assert isinstance(UpperCamelCase__ , UpperCamelCase__)
assert isinstance(exported_content[0] , UpperCamelCase__)
assert len(UpperCamelCase__) == 1_0
@pytest.mark.parametrize(
'''orient, container, keys, len_at''' , [
('''records''', list, {'''tokens''', '''labels''', '''answers''', '''id'''}, None),
('''split''', dict, {'''columns''', '''data'''}, '''data'''),
('''index''', dict, set('''0123456789'''), None),
('''columns''', dict, {'''tokens''', '''labels''', '''answers''', '''id'''}, '''tokens'''),
('''values''', list, None, None),
('''table''', dict, {'''schema''', '''data'''}, '''data'''),
] , )
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , lines=UpperCamelCase__ , orient=UpperCamelCase__ , num_proc=2).write()
buffer.seek(0)
A__ = load_json(UpperCamelCase__)
assert isinstance(UpperCamelCase__ , UpperCamelCase__)
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(UpperCamelCase__ , '''keys''') and not hasattr(exported_content[0] , '''keys''')
if len_at:
assert len(exported_content[len_at]) == 1_0
else:
assert len(UpperCamelCase__) == 1_0
def snake_case_ ( self , a__):
with pytest.raises(UpperCamelCase__):
with io.BytesIO() as buffer:
JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , num_proc=0)
@pytest.mark.parametrize('''compression, extension''' , [('''gzip''', '''gz'''), ('''bz2''', '''bz2'''), ('''xz''', '''xz''')])
def snake_case_ ( self , a__ , a__ , a__ , a__ , a__):
A__ = tmp_path_factory.mktemp('''data''') / F"test.json.{extension}"
A__ = str(shared_datadir / F"test_file.json.{extension}")
JsonDatasetWriter(UpperCamelCase__ , UpperCamelCase__ , compression=UpperCamelCase__).write()
with fsspec.open(UpperCamelCase__ , '''rb''' , compression='''infer''') as f:
A__ = f.read()
with fsspec.open(UpperCamelCase__ , '''rb''' , compression='''infer''') as f:
A__ = f.read()
assert exported_content == original_content
| 632 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
_UpperCAmelCase = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : Dict, lowerCAmelCase : Optional[int], lowerCAmelCase : List[Any], lowerCAmelCase : str ) -> int:
for attribute in key.split('.' ):
A = getattr(lowerCAmelCase, lowerCAmelCase )
if weight_type is not None:
A = getattr(lowerCAmelCase, lowerCAmelCase ).shape
else:
A = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
A = value
elif weight_type == "weight_g":
A = value
elif weight_type == "weight_v":
A = value
elif weight_type == "bias":
A = value
else:
A = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def __UpperCamelCase (lowerCAmelCase : List[str], lowerCAmelCase : Optional[int] ) -> Dict:
A = []
A = fairseq_model.state_dict()
A = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
A = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, hf_model.config.feat_extract_norm == 'group', )
A = True
else:
for key, mapped_key in MAPPING.items():
A = 'unispeech_sat.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('.' )[:-1] ) != key):
# special case since naming is very similar
continue
A = True
if "*" in mapped_key:
A = name.split(lowerCAmelCase )[0].split('.' )[-2]
A = mapped_key.replace('*', lowerCAmelCase )
if "weight_g" in name:
A = 'weight_g'
elif "weight_v" in name:
A = 'weight_v'
elif "bias" in name:
A = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A = 'weight'
else:
A = None
set_recursively(lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
continue
if not is_used:
unused_weights.append(lowerCAmelCase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : Tuple, lowerCAmelCase : List[Any], lowerCAmelCase : int ) -> Dict:
A = full_name.split('conv_layers.' )[-1]
A = name.split('.' )
A = int(items[0] )
A = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
A = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCAmelCase )
@torch.no_grad()
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : Dict, lowerCAmelCase : Union[str, Any]=None, lowerCAmelCase : str=None, lowerCAmelCase : List[Any]=True ) -> Union[str, Any]:
if config_path is not None:
A = UniSpeechSatConfig.from_pretrained(lowerCAmelCase )
else:
A = UniSpeechSatConfig()
A = ''
if is_finetuned:
A = UniSpeechSatForCTC(lowerCAmelCase )
else:
A = UniSpeechSatForPreTraining(lowerCAmelCase )
A , A , A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
A = model[0].eval()
recursively_load_weights(lowerCAmelCase, lowerCAmelCase )
hf_wavavec.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCAmelCase = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 699 | 0 |
import numpy as np
import qiskit
def UpperCAmelCase_ ( __lowerCAmelCase = 8 , __lowerCAmelCase = None ) -> str:
__lowercase : Union[str, Any] = np.random.default_rng(seed=__lowerCAmelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
__lowercase : Tuple = 6 * key_len
# Measurement basis for Alice's qubits.
__lowercase : List[Any] = rng.integers(2 , size=__lowerCAmelCase )
# The set of states Alice will prepare.
__lowercase : List[Any] = rng.integers(2 , size=__lowerCAmelCase )
# Measurement basis for Bob's qubits.
__lowercase : List[str] = rng.integers(2 , size=__lowerCAmelCase )
# Quantum Circuit to simulate BB84
__lowercase : Any = qiskit.QuantumCircuit(__lowerCAmelCase , name='''BB84''' )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(__lowerCAmelCase ):
if alice_state[index] == 1:
bbaa_circ.x(__lowerCAmelCase )
if alice_basis[index] == 1:
bbaa_circ.h(__lowerCAmelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(__lowerCAmelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(__lowerCAmelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
__lowercase : Tuple = qiskit.Aer.get_backend('''aer_simulator''' )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
__lowercase : str = qiskit.execute(__lowerCAmelCase , __lowerCAmelCase , shots=1 , seed_simulator=__lowerCAmelCase )
# Returns the result of measurement.
__lowercase : Any = job.result().get_counts(__lowerCAmelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
__lowercase : Union[str, Any] = ''''''.join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
__lowercase : List[str] = gen_key[:key_len] if len(__lowerCAmelCase ) >= key_len else gen_key.ljust(__lowerCAmelCase , '''0''' )
return key
if __name__ == "__main__":
print(F'The generated key is : {bbaa(8, seed=0)}')
from doctest import testmod
testmod()
| 509 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_UpperCAmelCase = TypeVar("T")
class _UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : T ):
A = data
A = None
def __str__( self : Optional[int] ):
return f'''{self.data}'''
class _UpperCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple ):
A = None
def __iter__( self : int ):
A = self.top
while node:
yield node.data
A = node.next
def __str__( self : Any ):
return "->".join([str(UpperCamelCase__ ) for item in self] )
def __len__( self : Dict ):
return len(tuple(iter(self ) ) )
def UpperCamelCase ( self : List[str] ):
return self.top is None
def UpperCamelCase ( self : Dict , UpperCamelCase__ : T ):
A = Node(UpperCamelCase__ )
if not self.is_empty():
A = self.top
A = node
def UpperCamelCase ( self : Dict ):
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , UpperCamelCase__ )
A = self.top
A = self.top.next
return pop_node.data
def UpperCamelCase ( self : List[str] ):
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def UpperCamelCase ( self : List[str] ):
A = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 699 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"shi-labs/nat-mini-in1k-224": "https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json",
# See all Nat models at https://huggingface.co/models?filter=nat
}
class UpperCAmelCase_ (__lowercase , __lowercase ):
"""simple docstring"""
UpperCamelCase_ : List[str] = '''nat'''
UpperCamelCase_ : Optional[int] = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : List[str] , a_ : Any=4 , a_ : List[Any]=3 , a_ : List[str]=64 , a_ : Tuple=[3, 4, 6, 5] , a_ : Dict=[2, 4, 8, 16] , a_ : Optional[Any]=7 , a_ : Tuple=3.0 , a_ : Optional[Any]=True , a_ : Tuple=0.0 , a_ : Union[str, Any]=0.0 , a_ : Any=0.1 , a_ : int="gelu" , a_ : str=0.02 , a_ : Dict=1E-5 , a_ : Any=0.0 , a_ : Union[str, Any]=None , a_ : List[Any]=None , **a_ : List[Any] , )-> Dict:
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
UpperCAmelCase_ : int = patch_size
UpperCAmelCase_ : int = num_channels
UpperCAmelCase_ : Any = embed_dim
UpperCAmelCase_ : str = depths
UpperCAmelCase_ : str = len(UpperCamelCase__ )
UpperCAmelCase_ : Dict = num_heads
UpperCAmelCase_ : Any = kernel_size
UpperCAmelCase_ : int = mlp_ratio
UpperCAmelCase_ : str = qkv_bias
UpperCAmelCase_ : Optional[int] = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : int = drop_path_rate
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : Optional[int] = layer_norm_eps
UpperCAmelCase_ : int = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCAmelCase_ : List[str] = int(embed_dim * 2 ** (len(UpperCamelCase__ ) - 1) )
UpperCAmelCase_ : int = layer_scale_init_value
UpperCAmelCase_ : Union[str, Any] = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(UpperCamelCase__ ) + 1 )]
UpperCAmelCase_ ,UpperCAmelCase_ : str = get_aligned_output_features_output_indices(
out_features=UpperCamelCase__ , out_indices=UpperCamelCase__ , stage_names=self.stage_names )
| 470 |
from __future__ import annotations
import math
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : bool, lowerCAmelCase : list[int], lowerCAmelCase : float ) -> int:
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if not scores:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1, node_index * 2, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), )
if is_max
else min(
minimax(depth + 1, node_index * 2, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), minimax(depth + 1, node_index * 2 + 1, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase ), )
)
def __UpperCamelCase () -> None:
A = [90, 23, 6, 33, 21, 65, 123, 34_423]
A = math.log(len(lowerCAmelCase ), 2 )
print(f'''Optimal value : {minimax(0, 0, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 699 | 0 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__lowerCAmelCase : Dict =logging.get_logger(__name__)
__lowerCAmelCase : str =OrderedDict(
[
("align", "EfficientNetImageProcessor"),
("beit", "BeitImageProcessor"),
("bit", "BitImageProcessor"),
("blip", "BlipImageProcessor"),
("blip-2", "BlipImageProcessor"),
("bridgetower", "BridgeTowerImageProcessor"),
("chinese_clip", "ChineseCLIPImageProcessor"),
("clip", "CLIPImageProcessor"),
("clipseg", "ViTImageProcessor"),
("conditional_detr", "ConditionalDetrImageProcessor"),
("convnext", "ConvNextImageProcessor"),
("convnextv2", "ConvNextImageProcessor"),
("cvt", "ConvNextImageProcessor"),
("data2vec-vision", "BeitImageProcessor"),
("deformable_detr", "DeformableDetrImageProcessor"),
("deit", "DeiTImageProcessor"),
("deta", "DetaImageProcessor"),
("detr", "DetrImageProcessor"),
("dinat", "ViTImageProcessor"),
("donut-swin", "DonutImageProcessor"),
("dpt", "DPTImageProcessor"),
("efficientformer", "EfficientFormerImageProcessor"),
("efficientnet", "EfficientNetImageProcessor"),
("flava", "FlavaImageProcessor"),
("focalnet", "BitImageProcessor"),
("git", "CLIPImageProcessor"),
("glpn", "GLPNImageProcessor"),
("groupvit", "CLIPImageProcessor"),
("imagegpt", "ImageGPTImageProcessor"),
("instructblip", "BlipImageProcessor"),
("layoutlmv2", "LayoutLMv2ImageProcessor"),
("layoutlmv3", "LayoutLMv3ImageProcessor"),
("levit", "LevitImageProcessor"),
("mask2former", "Mask2FormerImageProcessor"),
("maskformer", "MaskFormerImageProcessor"),
("mgp-str", "ViTImageProcessor"),
("mobilenet_v1", "MobileNetV1ImageProcessor"),
("mobilenet_v2", "MobileNetV2ImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevitv2", "MobileViTImageProcessor"),
("nat", "ViTImageProcessor"),
("oneformer", "OneFormerImageProcessor"),
("owlvit", "OwlViTImageProcessor"),
("perceiver", "PerceiverImageProcessor"),
("pix2struct", "Pix2StructImageProcessor"),
("poolformer", "PoolFormerImageProcessor"),
("regnet", "ConvNextImageProcessor"),
("resnet", "ConvNextImageProcessor"),
("sam", "SamImageProcessor"),
("segformer", "SegformerImageProcessor"),
("swiftformer", "ViTImageProcessor"),
("swin", "ViTImageProcessor"),
("swin2sr", "Swin2SRImageProcessor"),
("swinv2", "ViTImageProcessor"),
("table-transformer", "DetrImageProcessor"),
("timesformer", "VideoMAEImageProcessor"),
("tvlt", "TvltImageProcessor"),
("upernet", "SegformerImageProcessor"),
("van", "ConvNextImageProcessor"),
("videomae", "VideoMAEImageProcessor"),
("vilt", "ViltImageProcessor"),
("vit", "ViTImageProcessor"),
("vit_hybrid", "ViTHybridImageProcessor"),
("vit_mae", "ViTImageProcessor"),
("vit_msn", "ViTImageProcessor"),
("xclip", "CLIPImageProcessor"),
("yolos", "YolosImageProcessor"),
]
)
__lowerCAmelCase : List[Any] =_LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def UpperCamelCase ( _lowerCamelCase : str ):
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
A__ = model_type_to_module_name(_lowerCamelCase )
A__ = importlib.import_module(F".{module_name}" , "transformers.models" )
try:
return getattr(_lowerCamelCase , _lowerCamelCase )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(_lowerCamelCase , "__name__" , _lowerCamelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
A__ = importlib.import_module("transformers" )
if hasattr(_lowerCamelCase , _lowerCamelCase ):
return getattr(_lowerCamelCase , _lowerCamelCase )
return None
def UpperCamelCase ( _lowerCamelCase : Union[str, os.PathLike] , _lowerCamelCase : Optional[Union[str, os.PathLike]] = None , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , _lowerCamelCase : Optional[Dict[str, str]] = None , _lowerCamelCase : Optional[Union[bool, str]] = None , _lowerCamelCase : Optional[str] = None , _lowerCamelCase : bool = False , **_lowerCamelCase : Any , ):
A__ = get_file_from_repo(
_lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , )
if resolved_config_file is None:
logger.info(
"Could not locate the image processor configuration file, will try to use the model config instead." )
return {}
with open(_lowerCamelCase , encoding="utf-8" ) as reader:
return json.load(_lowerCamelCase )
class UpperCAmelCase :
def __init__( self :Union[str, Any] )-> str:
raise EnvironmentError(
"AutoImageProcessor is designed to be instantiated "
"using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(UpperCamelCase__ )
def UpperCAmelCase_ ( cls :List[str] , lowercase_ :Union[str, Any] , **lowercase_ :Tuple )-> Dict:
A__ = kwargs.pop("config" , UpperCamelCase__ )
A__ = kwargs.pop("trust_remote_code" , UpperCamelCase__ )
A__ = True
A__, A__ = ImageProcessingMixin.get_image_processor_dict(UpperCamelCase__ , **UpperCamelCase__ )
A__ = config_dict.get("image_processor_type" , UpperCamelCase__ )
A__ = None
if "AutoImageProcessor" in config_dict.get("auto_map" , {} ):
A__ = config_dict["auto_map"]["AutoImageProcessor"]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
A__ = config_dict.pop("feature_extractor_type" , UpperCamelCase__ )
if feature_extractor_class is not None:
logger.warning(
"Could not find image processor class in the image processor config or the model config. Loading"
" based on pattern matching with the model\'s feature extractor configuration." )
A__ = feature_extractor_class.replace("FeatureExtractor" , "ImageProcessor" )
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
A__ = config_dict["auto_map"]["AutoFeatureExtractor"]
A__ = feature_extractor_auto_map.replace("FeatureExtractor" , "ImageProcessor" )
logger.warning(
"Could not find image processor auto map in the image processor config or the model config."
" Loading based on pattern matching with the model\'s feature extractor configuration." )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ = AutoConfig.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
# It could be in `config.image_processor_type``
A__ = getattr(UpperCamelCase__ , "image_processor_type" , UpperCamelCase__ )
if hasattr(UpperCamelCase__ , "auto_map" ) and "AutoImageProcessor" in config.auto_map:
A__ = config.auto_map["AutoImageProcessor"]
if image_processor_class is not None:
A__ = image_processor_class_from_name(UpperCamelCase__ )
A__ = image_processor_auto_map is not None
A__ = image_processor_class is not None or type(UpperCamelCase__ ) in IMAGE_PROCESSOR_MAPPING
A__ = resolve_trust_remote_code(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if has_remote_code and trust_remote_code:
A__ = get_class_from_dynamic_module(
UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
A__ = kwargs.pop("code_revision" , UpperCamelCase__ )
if os.path.isdir(UpperCamelCase__ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
elif image_processor_class is not None:
return image_processor_class.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(UpperCamelCase__ ) in IMAGE_PROCESSOR_MAPPING:
A__ = IMAGE_PROCESSOR_MAPPING[type(UpperCamelCase__ )]
return image_processor_class.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
raise ValueError(
F"Unrecognized image processor in {pretrained_model_name_or_path}. Should have a "
F"`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following "
F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}" )
@staticmethod
def UpperCAmelCase_ ( lowercase_ :List[Any] , lowercase_ :Optional[int] )-> str:
IMAGE_PROCESSOR_MAPPING.register(UpperCamelCase__ , UpperCamelCase__ )
| 440 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : PreTrainedTokenizer, lowerCAmelCase : int, lowerCAmelCase : Optional[int] = None, ) -> Dict:
A = {}
if train_file is not None:
A = [train_file]
if eval_file is not None:
A = [eval_file]
if test_file is not None:
A = [test_file]
A = datasets.load_dataset('csv', data_files=lowerCAmelCase )
A = list(ds[list(files.keys() )[0]].features.keys() )
A = features_name.pop(lowerCAmelCase )
A = list(set(ds[list(files.keys() )[0]][label_name] ) )
A = {label: i for i, label in enumerate(lowerCAmelCase )}
A = tokenizer.model_input_names
A = {}
if len(lowerCAmelCase ) == 1:
for k in files.keys():
A = ds[k].map(
lambda lowerCAmelCase : tokenizer.batch_encode_plus(
example[features_name[0]], truncation=lowerCAmelCase, max_length=lowerCAmelCase, padding='max_length' ), batched=lowerCAmelCase, )
elif len(lowerCAmelCase ) == 2:
for k in files.keys():
A = ds[k].map(
lambda lowerCAmelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]), truncation=lowerCAmelCase, max_length=lowerCAmelCase, padding='max_length', ), batched=lowerCAmelCase, )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
A = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
A = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
A = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = field(metadata={'''help''': '''Which column contains the label'''} )
SCREAMING_SNAKE_CASE : str = field(default=__lowercase , metadata={'''help''': '''The path of the training file'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase , metadata={'''help''': '''The path of the development file'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase , metadata={'''help''': '''The path of the test file'''} )
SCREAMING_SNAKE_CASE : int = field(
default=1_28 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE : bool = field(default=__lowercase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def __UpperCamelCase () -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
A , A , A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, )
logger.info(
f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
f'''16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
A , A , A , A = get_tfds(
train_file=data_args.train_file, eval_file=data_args.dev_file, test_file=data_args.test_file, tokenizer=lowerCAmelCase, label_column_id=data_args.label_column_id, max_seq_length=data_args.max_seq_length, )
A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=len(lowerCAmelCase ), labelaid=lowerCAmelCase, idalabel={id: label for label, id in labelaid.items()}, finetuning_task='text-classification', cache_dir=model_args.cache_dir, )
with training_args.strategy.scope():
A = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path, from_pt=bool('.bin' in model_args.model_name_or_path ), config=lowerCAmelCase, cache_dir=model_args.cache_dir, )
def compute_metrics(lowerCAmelCase : EvalPrediction ) -> Dict:
A = np.argmax(p.predictions, axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
A = TFTrainer(
model=lowerCAmelCase, args=lowerCAmelCase, train_dataset=lowerCAmelCase, eval_dataset=lowerCAmelCase, compute_metrics=lowerCAmelCase, )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A = trainer.evaluate()
A = os.path.join(training_args.output_dir, 'eval_results.txt' )
with open(lowerCAmelCase, 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
results.update(lowerCAmelCase )
return results
if __name__ == "__main__":
main()
| 699 | 0 |
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
UpperCamelCase = ''
if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'):
class __lowerCamelCase ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE__ : str = " " ) -> List[str]:
lowerCAmelCase__ = sentence_delimiter
def a ( self : Any , SCREAMING_SNAKE_CASE__ : str ) -> Optional[Any]:
return list(UpperCamelCase__ )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
lowerCAmelCase__ = []
for sent_idx, sentence in enumerate(UpperCamelCase__ ):
chars.extend(self.process_string(UpperCamelCase__ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(UpperCamelCase__ ) - 1:
chars.append(self.sentence_delimiter )
return chars
UpperCamelCase = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
UpperCamelCase = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
UpperCamelCase = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
UpperCamelCase = '\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n'
UpperCamelCase = '\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> cer = datasets.load_metric(\"cer\")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
"""simple docstring"""
def a ( self : List[str] ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
"https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates",
] , )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int=False ) -> Optional[Any]:
if concatenate_texts:
return jiwer.compute_measures(
UpperCamelCase__ , UpperCamelCase__ , truth_transform=UpperCamelCase__ , hypothesis_transform=UpperCamelCase__ , )["wer"]
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
for prediction, reference in zip(UpperCamelCase__ , UpperCamelCase__ ):
lowerCAmelCase__ = jiwer.compute_measures(
UpperCamelCase__ , UpperCamelCase__ , truth_transform=UpperCamelCase__ , hypothesis_transform=UpperCamelCase__ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 61 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["XGLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ["XGLMTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XGLMForCausalLM",
"XGLMModel",
"XGLMPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"FlaxXGLMForCausalLM",
"FlaxXGLMModel",
"FlaxXGLMPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXGLMForCausalLM",
"TFXGLMModel",
"TFXGLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 699 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__( __lowercase , unittest.TestCase ):
__magic_name__ : int = AudioLDMPipeline
__magic_name__ : Union[str, Any] = TEXT_TO_AUDIO_PARAMS
__magic_name__ : Union[str, Any] = TEXT_TO_AUDIO_BATCH_PARAMS
__magic_name__ : Optional[int] = frozenset(
[
"num_inference_steps",
"num_waveforms_per_prompt",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
def a__( self : Dict )-> int:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=(32, 64) , class_embed_type='''simple_projection''' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=UpperCamelCase__ , )
UpperCAmelCase = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
UpperCAmelCase = ClapTextModelWithProjection(UpperCamelCase__ )
UpperCAmelCase = RobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-roberta''' , model_max_length=77 )
UpperCAmelCase = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=UpperCamelCase__ , )
UpperCAmelCase = SpeechTaHifiGan(UpperCamelCase__ )
UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''vocoder''': vocoder,
}
return components
def a__( self : Optional[Any] , lowerCAmelCase : int , lowerCAmelCase : Tuple=0 )-> str:
"""simple docstring"""
if str(UpperCamelCase__ ).startswith('''mps''' ):
UpperCAmelCase = torch.manual_seed(UpperCamelCase__ )
else:
UpperCAmelCase = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
UpperCAmelCase = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
}
return inputs
def a__( self : Tuple )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = AudioLDMPipeline(**UpperCamelCase__ )
UpperCAmelCase = audioldm_pipe.to(UpperCamelCase__ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCAmelCase = self.get_dummy_inputs(UpperCamelCase__ )
UpperCAmelCase = audioldm_pipe(**UpperCamelCase__ )
UpperCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(UpperCamelCase__ ) == 256
UpperCAmelCase = audio[:10]
UpperCAmelCase = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def a__( self : Tuple )-> Any:
"""simple docstring"""
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = AudioLDMPipeline(**UpperCamelCase__ )
UpperCAmelCase = audioldm_pipe.to(UpperCamelCase__ )
UpperCAmelCase = audioldm_pipe.to(UpperCamelCase__ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCAmelCase = self.get_dummy_inputs(UpperCamelCase__ )
UpperCAmelCase = 3 * [inputs['''prompt''']]
# forward
UpperCAmelCase = audioldm_pipe(**UpperCamelCase__ )
UpperCAmelCase = output.audios[0]
UpperCAmelCase = self.get_dummy_inputs(UpperCamelCase__ )
UpperCAmelCase = 3 * [inputs.pop('''prompt''' )]
UpperCAmelCase = audioldm_pipe.tokenizer(
UpperCamelCase__ , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors='''pt''' , )
UpperCAmelCase = text_inputs['''input_ids'''].to(UpperCamelCase__ )
UpperCAmelCase = audioldm_pipe.text_encoder(
UpperCamelCase__ , )
UpperCAmelCase = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCAmelCase = F.normalize(UpperCamelCase__ , dim=-1 )
UpperCAmelCase = prompt_embeds
# forward
UpperCAmelCase = audioldm_pipe(**UpperCamelCase__ )
UpperCAmelCase = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def a__( self : Optional[Any] )-> Dict:
"""simple docstring"""
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = AudioLDMPipeline(**UpperCamelCase__ )
UpperCAmelCase = audioldm_pipe.to(UpperCamelCase__ )
UpperCAmelCase = audioldm_pipe.to(UpperCamelCase__ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCAmelCase = self.get_dummy_inputs(UpperCamelCase__ )
UpperCAmelCase = 3 * ['''this is a negative prompt''']
UpperCAmelCase = negative_prompt
UpperCAmelCase = 3 * [inputs['''prompt''']]
# forward
UpperCAmelCase = audioldm_pipe(**UpperCamelCase__ )
UpperCAmelCase = output.audios[0]
UpperCAmelCase = self.get_dummy_inputs(UpperCamelCase__ )
UpperCAmelCase = 3 * [inputs.pop('''prompt''' )]
UpperCAmelCase = []
for p in [prompt, negative_prompt]:
UpperCAmelCase = audioldm_pipe.tokenizer(
UpperCamelCase__ , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors='''pt''' , )
UpperCAmelCase = text_inputs['''input_ids'''].to(UpperCamelCase__ )
UpperCAmelCase = audioldm_pipe.text_encoder(
UpperCamelCase__ , )
UpperCAmelCase = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
UpperCAmelCase = F.normalize(UpperCamelCase__ , dim=-1 )
embeds.append(UpperCamelCase__ )
UpperCAmelCase , UpperCAmelCase = embeds
# forward
UpperCAmelCase = audioldm_pipe(**UpperCamelCase__ )
UpperCAmelCase = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def a__( self : Tuple )-> int:
"""simple docstring"""
UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = PNDMScheduler(skip_prk_steps=UpperCamelCase__ )
UpperCAmelCase = AudioLDMPipeline(**UpperCamelCase__ )
UpperCAmelCase = audioldm_pipe.to(UpperCamelCase__ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCAmelCase = self.get_dummy_inputs(UpperCamelCase__ )
UpperCAmelCase = '''egg cracking'''
UpperCAmelCase = audioldm_pipe(**UpperCamelCase__ , negative_prompt=UpperCamelCase__ )
UpperCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(UpperCamelCase__ ) == 256
UpperCAmelCase = audio[:10]
UpperCAmelCase = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def a__( self : Optional[Any] )-> Any:
"""simple docstring"""
UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = PNDMScheduler(skip_prk_steps=UpperCamelCase__ )
UpperCAmelCase = AudioLDMPipeline(**UpperCamelCase__ )
UpperCAmelCase = audioldm_pipe.to(UpperCamelCase__ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCAmelCase = '''A hammer hitting a wooden surface'''
# test num_waveforms_per_prompt=1 (default)
UpperCAmelCase = audioldm_pipe(UpperCamelCase__ , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
UpperCAmelCase = 2
UpperCAmelCase = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
UpperCAmelCase = 2
UpperCAmelCase = audioldm_pipe(UpperCamelCase__ , num_inference_steps=2 , num_waveforms_per_prompt=UpperCamelCase__ ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
UpperCAmelCase = 2
UpperCAmelCase = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=UpperCamelCase__ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def a__( self : Any )-> Any:
"""simple docstring"""
UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = AudioLDMPipeline(**UpperCamelCase__ )
UpperCAmelCase = audioldm_pipe.to(UpperCamelCase__ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCAmelCase = audioldm_pipe.vocoder.config.sampling_rate
UpperCAmelCase = self.get_dummy_inputs(UpperCamelCase__ )
UpperCAmelCase = audioldm_pipe(audio_length_in_s=0.016 , **UpperCamelCase__ )
UpperCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(UpperCamelCase__ ) / vocoder_sampling_rate == 0.016
UpperCAmelCase = audioldm_pipe(audio_length_in_s=0.032 , **UpperCamelCase__ )
UpperCAmelCase = output.audios[0]
assert audio.ndim == 1
assert len(UpperCamelCase__ ) / vocoder_sampling_rate == 0.032
def a__( self : int )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = AudioLDMPipeline(**UpperCamelCase__ )
UpperCAmelCase = audioldm_pipe.to(UpperCamelCase__ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCAmelCase = ['''hey''']
UpperCAmelCase = audioldm_pipe(UpperCamelCase__ , num_inference_steps=1 )
UpperCAmelCase = output.audios.shape
assert audio_shape == (1, 256)
UpperCAmelCase = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
UpperCAmelCase = SpeechTaHifiGan(UpperCamelCase__ ).to(UpperCamelCase__ )
UpperCAmelCase = audioldm_pipe(UpperCamelCase__ , num_inference_steps=1 )
UpperCAmelCase = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def a__( self : List[str] )-> Tuple:
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=UpperCamelCase__ )
def a__( self : Dict )-> Tuple:
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=UpperCamelCase__ )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def a__( self : Tuple )-> int:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCamelCase__ )
@slow
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Tuple )-> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__( self : List[str] , lowerCAmelCase : Tuple , lowerCAmelCase : List[Any]="cpu" , lowerCAmelCase : Optional[int]=torch.floataa , lowerCAmelCase : Union[str, Any]=0 )-> Any:
"""simple docstring"""
UpperCAmelCase = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
UpperCAmelCase = np.random.RandomState(UpperCamelCase__ ).standard_normal((1, 8, 128, 16) )
UpperCAmelCase = torch.from_numpy(UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ )
UpperCAmelCase = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 2.5,
}
return inputs
def a__( self : Optional[int] )-> Dict:
"""simple docstring"""
UpperCAmelCase = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' )
UpperCAmelCase = audioldm_pipe.to(UpperCamelCase__ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCAmelCase = self.get_inputs(UpperCamelCase__ )
UpperCAmelCase = 25
UpperCAmelCase = audioldm_pipe(**UpperCamelCase__ ).audios[0]
assert audio.ndim == 1
assert len(UpperCamelCase__ ) == 81920
UpperCAmelCase = audio[77230:77240]
UpperCAmelCase = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
UpperCAmelCase = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def a__( self : Any )-> str:
"""simple docstring"""
UpperCAmelCase = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' )
UpperCAmelCase = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
UpperCAmelCase = audioldm_pipe.to(UpperCamelCase__ )
audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCAmelCase = self.get_inputs(UpperCamelCase__ )
UpperCAmelCase = audioldm_pipe(**UpperCamelCase__ ).audios[0]
assert audio.ndim == 1
assert len(UpperCamelCase__ ) == 81920
UpperCAmelCase = audio[27780:27790]
UpperCAmelCase = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
UpperCAmelCase = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 210 |
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> Optional[int]:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCAmelCase, int(b / 2 ) ) * actual_power(lowerCAmelCase, int(b / 2 ) )
else:
return a * actual_power(lowerCAmelCase, int(b / 2 ) ) * actual_power(lowerCAmelCase, int(b / 2 ) )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> float:
if b < 0:
return 1 / actual_power(lowerCAmelCase, lowerCAmelCase )
return actual_power(lowerCAmelCase, lowerCAmelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 699 | 0 |