code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
lowerCAmelCase_ : Union[str, Any] = 'bart'
lowerCAmelCase_ : int = True
@st.cache(allow_output_mutation=_snake_case )
def __A ( ) -> Optional[Any]:
'''simple docstring'''
if LOAD_DENSE_INDEX:
_UpperCamelCase : List[str] = AutoTokenizer.from_pretrained("yjernite/retribert-base-uncased" )
_UpperCamelCase : Any = AutoModel.from_pretrained("yjernite/retribert-base-uncased" ).to("cuda:0" )
_UpperCamelCase : List[Any] = qar_model.eval()
else:
_UpperCamelCase : Union[str, Any] = (None, None)
if MODEL_TYPE == "bart":
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained("yjernite/bart_eli5" )
_UpperCamelCase : Any = AutoModelForSeqaSeqLM.from_pretrained("yjernite/bart_eli5" ).to("cuda:0" )
_UpperCamelCase : Tuple = torch.load("seq2seq_models/eli5_bart_model_blm_2.pth" )
sas_model.load_state_dict(save_dict["model"] )
_UpperCamelCase : int = sas_model.eval()
else:
_UpperCamelCase : List[str] = make_qa_sas_model(
model_name="t5-small" ,from_file="seq2seq_models/eli5_t5_model_1024_4.pth" ,device="cuda:0" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_snake_case )
def __A ( ) -> Dict:
'''simple docstring'''
if LOAD_DENSE_INDEX:
_UpperCamelCase : int = faiss.StandardGpuResources()
_UpperCamelCase : Dict = datasets.load_dataset(path="wiki_snippets" ,name="wiki40b_en_100_0" )["""train"""]
_UpperCamelCase : Any = np.memmap(
"wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat" ,dtype="float32" ,mode="r" ,shape=(wikiaab_passages.num_rows, 1_2_8) ,)
_UpperCamelCase : Dict = faiss.IndexFlatIP(1_2_8 )
_UpperCamelCase : int = faiss.index_cpu_to_gpu(_snake_case ,1 ,_snake_case )
wikiaab_gpu_index_flat.add(_snake_case ) # TODO fix for larger GPU
else:
_UpperCamelCase : Optional[int] = (None, None)
_UpperCamelCase : Any = Elasticsearch([{"host": "localhost", "port": "9200"}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_snake_case )
def __A ( ) -> Dict:
'''simple docstring'''
_UpperCamelCase : List[str] = datasets.load_dataset("eli5" ,name="LFQA_reddit" )
_UpperCamelCase : str = elia["""train_eli5"""]
_UpperCamelCase : Any = np.memmap(
"eli5_questions_reps.dat" ,dtype="float32" ,mode="r" ,shape=(elia_train.num_rows, 1_2_8) )
_UpperCamelCase : List[str] = faiss.IndexFlatIP(1_2_8 )
eli5_train_q_index.add(_snake_case )
return (elia_train, eli5_train_q_index)
lowerCAmelCase_ : Dict = load_indexes()
lowerCAmelCase_ : List[str] = load_models()
lowerCAmelCase_ : Optional[int] = load_train_data()
def __A ( UpperCAmelCase ,UpperCAmelCase=1_0 ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : List[Any] = embed_questions_for_retrieval([question] ,_snake_case ,_snake_case )
_UpperCamelCase : List[str] = eli5_train_q_index.search(_snake_case ,_snake_case )
_UpperCamelCase : Tuple = [elia_train[int(_snake_case )] for i in I[0]]
return nn_examples
def __A ( UpperCAmelCase ,UpperCAmelCase="wiki40b" ,UpperCAmelCase="dense" ,UpperCAmelCase=1_0 ) -> Optional[Any]:
'''simple docstring'''
if source == "none":
_UpperCamelCase : Optional[int] = (""" <P> """.join(["" for _ in range(1_1 )] ).strip(), [])
else:
if method == "dense":
_UpperCamelCase : Dict = query_qa_dense_index(
_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case )
else:
_UpperCamelCase : str = query_es_index(
_snake_case ,_snake_case ,index_name="english_wiki40b_snippets_100w" ,n_results=_snake_case ,)
_UpperCamelCase : Union[str, Any] = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
_UpperCamelCase : Optional[int] = """question: {} context: {}""".format(_snake_case ,_snake_case )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda UpperCAmelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda UpperCAmelCase : None),
} )
def __A ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase=6_4 ,UpperCAmelCase=2_5_6 ,UpperCAmelCase=False ,UpperCAmelCase=2 ,UpperCAmelCase=0.95 ,UpperCAmelCase=0.8 ) -> Union[str, Any]:
'''simple docstring'''
with torch.no_grad():
_UpperCamelCase : Optional[Any] = qa_sas_generate(
_snake_case ,_snake_case ,_snake_case ,num_answers=1 ,num_beams=_snake_case ,min_len=_snake_case ,max_len=_snake_case ,do_sample=_snake_case ,temp=_snake_case ,top_p=_snake_case ,top_k=_snake_case ,max_input_length=1_0_2_4 ,device="cuda:0" ,)[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
lowerCAmelCase_ : Optional[Any] = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
lowerCAmelCase_ : str = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
lowerCAmelCase_ : List[Any] = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
lowerCAmelCase_ : List[Any] = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
lowerCAmelCase_ : str = st.sidebar.checkbox("""Demo options""")
if demo_options:
lowerCAmelCase_ : Optional[int] = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
lowerCAmelCase_ : Optional[Any] = action_list.index(action_st)
lowerCAmelCase_ : str = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
lowerCAmelCase_ : Any = show_type == 'Show full text of passages'
else:
lowerCAmelCase_ : Union[str, Any] = 3
lowerCAmelCase_ : Union[str, Any] = True
lowerCAmelCase_ : Optional[Any] = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
lowerCAmelCase_ : Any = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
lowerCAmelCase_ : List[str] = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
lowerCAmelCase_ : Optional[Any] = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
lowerCAmelCase_ : Optional[Any] = 'wiki40b'
lowerCAmelCase_ : Tuple = 'dense'
lowerCAmelCase_ : Tuple = 'beam'
lowerCAmelCase_ : int = 2
lowerCAmelCase_ : Tuple = 64
lowerCAmelCase_ : List[str] = 256
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : str = st.sidebar.checkbox("""Generation options""")
if generate_options:
lowerCAmelCase_ : Dict = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
lowerCAmelCase_ : Tuple = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
lowerCAmelCase_ : List[str] = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
lowerCAmelCase_ : Optional[Any] = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
lowerCAmelCase_ : str = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
lowerCAmelCase_ : Tuple = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
lowerCAmelCase_ : Tuple = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
lowerCAmelCase_ : Dict = None
# start main text
lowerCAmelCase_ : Tuple = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
lowerCAmelCase_ : List[Any] = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
lowerCAmelCase_ : str = st.text_input("""Enter your question here:""", """""")
else:
lowerCAmelCase_ : int = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
lowerCAmelCase_ : Optional[Any] = make_support(question, source=wiki_source, method="""dense""", n_results=10)
lowerCAmelCase_ : Union[str, Any] = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
lowerCAmelCase_ : List[Any] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
lowerCAmelCase_ : List[Any] = support_list[:10]
lowerCAmelCase_ : str = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
lowerCAmelCase_ : int = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
lowerCAmelCase_ : Tuple = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
lowerCAmelCase_ : Optional[Any] = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(""" """, """_"""))
lowerCAmelCase_ : int = res[1].strip()
if sec_titles == "":
lowerCAmelCase_ : List[str] = '[{}]({})'.format(res[0], wiki_url)
else:
lowerCAmelCase_ : List[Any] = sec_titles.split(""" & """)
lowerCAmelCase_ : List[str] = ' & '.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
lowerCAmelCase_ : int = find_nearest_training(question)
lowerCAmelCase_ : Any = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
lowerCAmelCase_ : int = [
'{}. {}'.format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
lowerCAmelCase_ : List[str] = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 435 |
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Dict = {'vocab_file': 'spiece.model'}
UpperCAmelCase__ : Tuple = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
UpperCAmelCase__ : Optional[int] = {
'google/bigbird-roberta-base': 4_0_9_6,
'google/bigbird-roberta-large': 4_0_9_6,
'google/bigbird-base-trivia-itc': 4_0_9_6,
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = VOCAB_FILES_NAMES
__UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : List[str] = ['''input_ids''', '''attention_mask''']
__UpperCamelCase : List[int] = []
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="[SEP]" , SCREAMING_SNAKE_CASE__="[MASK]" , SCREAMING_SNAKE_CASE__="[CLS]" , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else bos_token
SCREAMING_SNAKE_CASE__ : Dict = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else eos_token
SCREAMING_SNAKE_CASE__ : Tuple = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else unk_token
SCREAMING_SNAKE_CASE__ : List[str] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else pad_token
SCREAMING_SNAKE_CASE__ : Optional[Any] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cls_token
SCREAMING_SNAKE_CASE__ : int = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ : List[Any] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
SCREAMING_SNAKE_CASE__ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ : List[Any] = vocab_file
SCREAMING_SNAKE_CASE__ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE__ )
@property
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
return self.sp_model.get_piece_size()
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ : Tuple = None
return state
def __setstate__(self , SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
SCREAMING_SNAKE_CASE__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
return self.sp_model.piece_to_id(SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.sp_model.IdToPiece(SCREAMING_SNAKE_CASE__ )
return token
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : List[str] = """"""
SCREAMING_SNAKE_CASE__ : Any = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ ) + token
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE__ )
return out_string.strip()
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = True , **SCREAMING_SNAKE_CASE__ , ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = kwargs.pop("""use_source_tokenizer""" , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : str = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
sub_texts.append(SCREAMING_SNAKE_CASE__ )
else:
current_sub_text.append(SCREAMING_SNAKE_CASE__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(SCREAMING_SNAKE_CASE__ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
SCREAMING_SNAKE_CASE__ : Optional[Any] = re.sub(r""" (\[(MASK|SEP)\])""" , r"""\1""" , """ """.join(SCREAMING_SNAKE_CASE__ ) )
else:
SCREAMING_SNAKE_CASE__ : Any = """""".join(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.clean_up_tokenization(SCREAMING_SNAKE_CASE__ )
return clean_text
else:
return text
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , """wb""" ) as fi:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 223 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE__:Any = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Dict = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 720 | """simple docstring"""
from __future__ import annotations
def _lowerCamelCase( a , a , a ):
if len(a ) == 0:
raise ValueError("find_max() arg is an empty sequence" )
if (
left >= len(a )
or left < -len(a )
or right >= len(a )
or right < -len(a )
):
raise IndexError("list index out of range" )
if left == right:
return nums[left]
__a = (left + right) >> 1 # the middle
__a = find_max(a , a , a ) # find max in range[left, mid]
__a = find_max(a , mid + 1 , a ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 67 | 0 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def a ( UpperCamelCase_ : Union[str, Any] = "https://www.worldometers.info/coronavirus" ) -> dict:
snake_case__ =BeautifulSoup(requests.get(_lowercase ).text , 'html.parser' )
snake_case__ =soup.findAll('h1' )
snake_case__ =soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(_lowercase , _lowercase )}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f"""{key}\n{value}\n""")
| 538 |
'''simple docstring'''
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowercase_ ( _lowercase , _lowercase=False ) -> Dict:
'''simple docstring'''
lowerCamelCase_ : Tuple = OmegaConf.load(_lowercase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowercase ) ) )
return config
def lowercase_ ( _lowercase , _lowercase=None , _lowercase=None ) -> Optional[int]:
'''simple docstring'''
if conf_path is None:
lowerCamelCase_ : int = '''./model_checkpoints/vqgan_only.yaml'''
lowerCamelCase_ : Dict = load_config(_lowercase , display=_lowercase )
lowerCamelCase_ : List[str] = VQModel(**config.model.params )
if ckpt_path is None:
lowerCamelCase_ : int = '''./model_checkpoints/vqgan_only.pt'''
lowerCamelCase_ : Union[str, Any] = torch.load(_lowercase , map_location=_lowercase )
if ".ckpt" in ckpt_path:
lowerCamelCase_ : str = sd['''state_dict''']
model.load_state_dict(_lowercase , strict=_lowercase )
model.to(_lowercase )
del sd
return model
def lowercase_ ( _lowercase , _lowercase ) -> List[str]:
'''simple docstring'''
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : Any = model.encode(_lowercase )
print(F"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" )
lowerCamelCase_ : Any = model.decode(_lowercase )
return xrec
def lowercase_ ( _lowercase , _lowercase=False ) -> Any:
'''simple docstring'''
lowerCamelCase_, lowerCamelCase_ : Any = string.rsplit('''.''' , 1 )
if reload:
lowerCamelCase_ : int = importlib.import_module(_lowercase )
importlib.reload(_lowercase )
return getattr(importlib.import_module(_lowercase , package=_lowercase ) , cls )
def lowercase_ ( _lowercase ) -> List[str]:
'''simple docstring'''
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) )
def lowercase_ ( _lowercase , _lowercase , _lowercase=True , _lowercase=True ) -> Any:
'''simple docstring'''
lowerCamelCase_ : int = instantiate_from_config(_lowercase )
if sd is not None:
model.load_state_dict(_lowercase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase ) -> Tuple:
'''simple docstring'''
if ckpt:
lowerCamelCase_ : List[Any] = torch.load(_lowercase , map_location='''cpu''' )
lowerCamelCase_ : int = pl_sd['''global_step''']
print(F"""loaded model from global step {global_step}.""" )
else:
lowerCamelCase_ : Optional[int] = {'''state_dict''': None}
lowerCamelCase_ : str = None
lowerCamelCase_ : Any = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=_lowercase , eval_mode=_lowercase )['''model''']
return model, global_step
| 422 | 0 |
'''simple docstring'''
from math import pi
def lowercase (_A , _A ):
"""simple docstring"""
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 716 |
'''simple docstring'''
import math
from datetime import datetime, timedelta
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Optional[Any] = year % 1_9
_lowerCAmelCase : Any = year % 4
_lowerCAmelCase : Optional[int] = year % 7
_lowerCAmelCase : int = math.floor(year / 1_0_0 )
_lowerCAmelCase : Dict = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
_lowerCAmelCase : Optional[Any] = leap_day_inhibits / 4
_lowerCAmelCase : Dict = (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
_lowerCAmelCase : List[Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
_lowerCAmelCase : Dict = (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
_lowerCAmelCase : Union[str, Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(_A , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(_A , 4 , 1_8 )
else:
return datetime(_A , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (19_94, 20_00, 20_10, 20_21, 20_23):
lowerCAmelCase : List[str] = """will be""" if year > datetime.now().year else """was"""
print(F'''Easter in {year} {tense} {gauss_easter(year)}''')
| 630 | 0 |
from datetime import datetime
import requests
def snake_case (UpperCAmelCase__ ) -> bytes:
UpperCamelCase_: Any = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
UpperCamelCase_: int = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(UpperCAmelCase__ ).content
if __name__ == "__main__":
A_ : Union[str, Any] = input('Enter Video/IGTV url: ').strip()
A_ : int = F'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(F'''Done. Video saved to disk as {file_name}.''') | 57 |
"""simple docstring"""
from collections import namedtuple
__snake_case : Optional[int] = namedtuple('from_to', 'from_ to')
__snake_case : Union[str, Any] = {
'cubicmeter': from_to(1, 1),
'litre': from_to(0.001, 1_000),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.00_454, 264.172),
'cubicyard': from_to(0.76_455, 1.30_795),
'cubicfoot': from_to(0.028, 35.3_147),
'cup': from_to(0.000_236_588, 4_226.75),
}
def a_ ( __a , __a , __a ):
if from_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ ''', '''.join(__a ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ ''', '''.join(__a ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 571 | 0 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : str = logging.get_logger(__name__)
__A : Tuple = {
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class UpperCAmelCase_ ( A ):
'''simple docstring'''
a__ = '''segformer'''
def __init__( self : Optional[Any] , a : Optional[int]=3 , a : int=4 , a : Tuple=[2, 2, 2, 2] , a : str=[8, 4, 2, 1] , a : Union[str, Any]=[32, 64, 160, 256] , a : Dict=[7, 3, 3, 3] , a : Optional[Any]=[4, 2, 2, 2] , a : List[str]=[1, 2, 5, 8] , a : int=[4, 4, 4, 4] , a : Any="gelu" , a : Optional[Any]=0.0 , a : Any=0.0 , a : Any=0.1 , a : Union[str, Any]=0.02 , a : int=0.1 , a : Tuple=1E-6 , a : Optional[int]=256 , a : Dict=255 , **a : Union[str, Any] , ) -> Optional[int]:
super().__init__(**a )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , a , )
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = num_encoder_blocks
SCREAMING_SNAKE_CASE = depths
SCREAMING_SNAKE_CASE = sr_ratios
SCREAMING_SNAKE_CASE = hidden_sizes
SCREAMING_SNAKE_CASE = patch_sizes
SCREAMING_SNAKE_CASE = strides
SCREAMING_SNAKE_CASE = mlp_ratios
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = classifier_dropout_prob
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = decoder_hidden_size
SCREAMING_SNAKE_CASE = kwargs.get("""reshape_last_stage""" , a )
SCREAMING_SNAKE_CASE = semantic_loss_ignore_index
class UpperCAmelCase_ ( A ):
'''simple docstring'''
a__ = version.parse('''1.11''' )
@property
def _UpperCAmelCase ( self : Dict ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _UpperCAmelCase ( self : str ) -> float:
return 1E-4
@property
def _UpperCAmelCase ( self : Union[str, Any] ) -> int:
return 12
| 450 |
import os
import sys
import unittest
__A : Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__A : List[Any] = os.path.join(git_repo_path, """src""", """diffusers""")
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = find_backend(""" if not is_torch_available():""" )
self.assertEqual(a , """torch""" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
SCREAMING_SNAKE_CASE = find_backend(""" if not (is_torch_available() and is_transformers_available()):""" )
self.assertEqual(a , """torch_and_transformers""" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
SCREAMING_SNAKE_CASE = find_backend(
""" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):""" )
self.assertEqual(a , """torch_and_transformers_and_onnx""" )
def _UpperCAmelCase ( self : List[str] ) -> int:
SCREAMING_SNAKE_CASE = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("""torch""" , a )
self.assertIn("""torch_and_transformers""" , a )
self.assertIn("""flax_and_transformers""" , a )
self.assertIn("""torch_and_transformers_and_onnx""" , a )
# Likewise, we can't assert on the exact content of a key
self.assertIn("""UNet2DModel""" , objects["""torch"""] )
self.assertIn("""FlaxUNet2DConditionModel""" , objects["""flax"""] )
self.assertIn("""StableDiffusionPipeline""" , objects["""torch_and_transformers"""] )
self.assertIn("""FlaxStableDiffusionPipeline""" , objects["""flax_and_transformers"""] )
self.assertIn("""LMSDiscreteScheduler""" , objects["""torch_and_scipy"""] )
self.assertIn("""OnnxStableDiffusionPipeline""" , objects["""torch_and_transformers_and_onnx"""] )
def _UpperCAmelCase ( self : Any ) -> int:
SCREAMING_SNAKE_CASE = create_dummy_object("""CONSTANT""" , """'torch'""" )
self.assertEqual(a , """\nCONSTANT = None\n""" )
SCREAMING_SNAKE_CASE = create_dummy_object("""function""" , """'torch'""" )
self.assertEqual(
a , """\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n""" )
SCREAMING_SNAKE_CASE = """
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, 'torch')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, 'torch')
"""
SCREAMING_SNAKE_CASE = create_dummy_object("""FakeClass""" , """'torch'""" )
self.assertEqual(a , a )
def _UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = """# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, [\"torch\"])
"""
SCREAMING_SNAKE_CASE = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]} )
self.assertEqual(dummy_files["""torch"""] , a )
| 450 | 1 |
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=UpperCAmelCase_ ):
lowerCamelCase__ : Any =["torch", "transformers", "onnx"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase ( cls , *lowerCamelCase , **lowerCamelCase ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase ( cls , *lowerCamelCase , **lowerCamelCase ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class A__ ( metaclass=UpperCAmelCase_ ):
lowerCamelCase__ : Any =["torch", "transformers", "onnx"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> int:
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase ( cls , *lowerCamelCase , **lowerCamelCase ) -> Tuple:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase ( cls , *lowerCamelCase , **lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class A__ ( metaclass=UpperCAmelCase_ ):
lowerCamelCase__ : List[Any] =["torch", "transformers", "onnx"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase ( cls , *lowerCamelCase , **lowerCamelCase ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase ( cls , *lowerCamelCase , **lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class A__ ( metaclass=UpperCAmelCase_ ):
lowerCamelCase__ : int =["torch", "transformers", "onnx"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> str:
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase ( cls , *lowerCamelCase , **lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase ( cls , *lowerCamelCase , **lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class A__ ( metaclass=UpperCAmelCase_ ):
lowerCamelCase__ : List[Any] =["torch", "transformers", "onnx"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase ( cls , *lowerCamelCase , **lowerCamelCase ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase ( cls , *lowerCamelCase , **lowerCamelCase ) -> List[Any]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
class A__ ( metaclass=UpperCAmelCase_ ):
lowerCamelCase__ : List[str] =["torch", "transformers", "onnx"]
def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> Any:
"""simple docstring"""
requires_backends(self , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase ( cls , *lowerCamelCase , **lowerCamelCase ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
@classmethod
def lowercase ( cls , *lowerCamelCase , **lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ['''torch''', '''transformers''', '''onnx'''] )
| 154 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
a__ : Tuple = logging.get_logger(__name__)
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str] ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(SCREAMING_SNAKE_CASE_ ):
return [[videos]]
raise ValueError(f"Could not make batched video from {videos}" )
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =["pixel_values"]
def __init__( self : int , a__ : bool = True , a__ : Dict[str, int] = None , a__ : PILImageResampling = PILImageResampling.BILINEAR , a__ : bool = True , a__ : Dict[str, int] = None , a__ : bool = True , a__ : Union[int, float] = 1 / 255 , a__ : bool = True , a__ : bool = True , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , **a__ : Union[str, Any] , ):
super().__init__(**a__ )
UpperCAmelCase = size if size is not None else {'''shortest_edge''': 256}
UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ )
UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase = get_size_dict(a__ , param_name='''crop_size''' )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = do_center_crop
UpperCAmelCase = crop_size
UpperCAmelCase = resample
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = offset
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __snake_case ( self : Dict , a__ : np.ndarray , a__ : Dict[str, int] , a__ : PILImageResampling = PILImageResampling.BILINEAR , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Optional[int] , ):
UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ )
if "shortest_edge" in size:
UpperCAmelCase = get_resize_output_image_size(a__ , size['''shortest_edge'''] , default_to_square=a__ )
elif "height" in size and "width" in size:
UpperCAmelCase = (size['''height'''], size['''width'''])
else:
raise ValueError(f"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(a__ , size=a__ , resample=a__ , data_format=a__ , **a__ )
def __snake_case ( self : Union[str, Any] , a__ : np.ndarray , a__ : Dict[str, int] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Any , ):
UpperCAmelCase = get_size_dict(a__ )
if "height" not in size or "width" not in size:
raise ValueError(f"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(a__ , size=(size['''height'''], size['''width''']) , data_format=a__ , **a__ )
def __snake_case ( self : List[str] , a__ : np.ndarray , a__ : Union[int, float] , a__ : bool = True , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Dict , ):
UpperCAmelCase = image.astype(np.floataa )
if offset:
UpperCAmelCase = image - (scale / 2)
return rescale(a__ , scale=a__ , data_format=a__ , **a__ )
def __snake_case ( self : int , a__ : np.ndarray , a__ : Union[float, List[float]] , a__ : Union[float, List[float]] , a__ : Optional[Union[str, ChannelDimension]] = None , **a__ : Any , ):
return normalize(a__ , mean=a__ , std=a__ , data_format=a__ , **a__ )
def __snake_case ( self : Any , a__ : ImageInput , a__ : bool = None , a__ : Dict[str, int] = None , a__ : PILImageResampling = None , a__ : bool = None , a__ : Dict[str, int] = None , a__ : bool = None , a__ : float = None , a__ : bool = None , a__ : bool = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase = to_numpy_array(a__ )
if do_resize:
UpperCAmelCase = self.resize(image=a__ , size=a__ , resample=a__ )
if do_center_crop:
UpperCAmelCase = self.center_crop(a__ , size=a__ )
if do_rescale:
UpperCAmelCase = self.rescale(image=a__ , scale=a__ , offset=a__ )
if do_normalize:
UpperCAmelCase = self.normalize(image=a__ , mean=a__ , std=a__ )
UpperCAmelCase = to_channel_dimension_format(a__ , a__ )
return image
def __snake_case ( self : List[Any] , a__ : ImageInput , a__ : bool = None , a__ : Dict[str, int] = None , a__ : PILImageResampling = None , a__ : bool = None , a__ : Dict[str, int] = None , a__ : bool = None , a__ : float = None , a__ : bool = None , a__ : bool = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[float, List[float]]] = None , a__ : Optional[Union[str, TensorType]] = None , a__ : ChannelDimension = ChannelDimension.FIRST , **a__ : Any , ):
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = offset if offset is not None else self.offset
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(a__ , default_to_square=a__ )
UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase = get_size_dict(a__ , param_name='''crop_size''' )
if not valid_images(a__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
UpperCAmelCase = make_batched(a__ )
UpperCAmelCase = [
[
self._preprocess_image(
image=a__ , do_resize=a__ , size=a__ , resample=a__ , do_center_crop=a__ , crop_size=a__ , do_rescale=a__ , rescale_factor=a__ , offset=a__ , do_normalize=a__ , image_mean=a__ , image_std=a__ , data_format=a__ , )
for img in video
]
for video in videos
]
UpperCAmelCase = {'''pixel_values''': videos}
return BatchFeature(data=a__ , tensor_type=a__ )
| 51 | 0 |
"""simple docstring"""
def snake_case ( A__ ,A__ ,A__ ):
if principal <= 0:
raise Exception("Principal borrowed must be > 0" )
if rate_per_annum < 0:
raise Exception("Rate of interest must be >= 0" )
if years_to_repay <= 0 or not isinstance(A__ ,A__ ):
raise Exception("Years to repay must be an integer > 0" )
# Yearly rate is divided by 12 to get monthly rate
UpperCAmelCase_ : Optional[Any] = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
UpperCAmelCase_ : str = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 463 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCamelCase_ (metaclass=__A ):
__magic_name__ = ['''onnx''']
def __init__( self : List[Any] , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Dict ) -> Dict:
requires_backends(self , ["onnx"] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] , *lowerCAmelCase_ : Dict , **lowerCAmelCase_ : Union[str, Any] ) -> int:
requires_backends(cls , ["onnx"] )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str , *lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : str ) -> Optional[Any]:
requires_backends(cls , ["onnx"] )
| 463 | 1 |
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( _lowercase : list[float] ) ->bool:
'''simple docstring'''
if len(_lowercase ) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space" )
if any(i <= 0 for i in nums ):
raise ValueError("All values must be greater than 0" )
a : List[Any] = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 633 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple ) ->Optional[Any]:
'''simple docstring'''
a : Any = []
a : List[str] = set({"(", "[", "{"} )
a : int = set({")", "]", "}"} )
a : int = {"{": "}", "[": "]", "(": ")"}
for i in range(len(_lowercase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_lowercase ) == 0 or (len(_lowercase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_lowercase ) == 0
def _SCREAMING_SNAKE_CASE ( ) ->Tuple:
'''simple docstring'''
a : Any = input("Enter sequence of brackets: " )
if is_balanced(_lowercase ):
print(_lowercase , "is balanced" )
else:
print(_lowercase , "is not balanced" )
if __name__ == "__main__":
main()
| 633 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCAmelCase ( A__: str , A__: str ) -> str | Literal[False]:
__lowerCamelCase : str = list(A__ )
__lowerCamelCase : Optional[Any] = list(A__ )
__lowerCamelCase : Union[str, Any] = 0
for i in range(len(A__ ) ):
if lista[i] != lista[i]:
count += 1
__lowerCamelCase : int = '_'
if count > 1:
return False
else:
return "".join(A__ )
def UpperCAmelCase ( A__: list[str] ) -> list[str]:
__lowerCamelCase : Union[str, Any] = []
while True:
__lowerCamelCase : int = ['$'] * len(A__ )
__lowerCamelCase : Optional[int] = []
for i in range(len(A__ ) ):
for j in range(i + 1 , len(A__ ) ):
__lowerCamelCase : Union[str, Any] = compare_string(binary[i] , binary[j] )
if k is False:
__lowerCamelCase : str = '*'
__lowerCamelCase : Dict = '*'
temp.append('X' )
for i in range(len(A__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(A__ ) == 0:
return pi
__lowerCamelCase : Union[str, Any] = list(set(A__ ) )
def UpperCAmelCase ( A__: int , A__: Sequence[float] ) -> list[str]:
__lowerCamelCase : Any = []
for minterm in minterms:
__lowerCamelCase : Union[str, Any] = ''
for _ in range(A__ ):
__lowerCamelCase : Tuple = str(minterm % 2 ) + string
minterm //= 2
temp.append(A__ )
return temp
def UpperCAmelCase ( A__: str , A__: str , A__: int ) -> bool:
__lowerCamelCase : Any = list(A__ )
__lowerCamelCase : int = list(A__ )
__lowerCamelCase : Optional[int] = 0
for i in range(len(A__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCAmelCase ( A__: list[list[int]] , A__: list[str] ) -> list[str]:
__lowerCamelCase : Union[str, Any] = []
__lowerCamelCase : List[Any] = [0] * len(A__ )
for i in range(len(chart[0] ) ):
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : int = -1
for j in range(len(A__ ) ):
if chart[j][i] == 1:
count += 1
__lowerCamelCase : Dict = j
if count == 1:
__lowerCamelCase : Optional[int] = 1
for i in range(len(A__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(A__ ) ):
__lowerCamelCase : Optional[int] = 0
temp.append(prime_implicants[i] )
while True:
__lowerCamelCase : Any = 0
__lowerCamelCase : Union[str, Any] = -1
__lowerCamelCase : Optional[Any] = 0
for i in range(len(A__ ) ):
__lowerCamelCase : Union[str, Any] = chart[i].count(1 )
if count_n > max_n:
__lowerCamelCase : Union[str, Any] = count_n
__lowerCamelCase : Union[str, Any] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(A__ ) ):
__lowerCamelCase : Union[str, Any] = 0
def UpperCAmelCase ( A__: list[str] , A__: list[str] ) -> list[list[int]]:
__lowerCamelCase : List[Any] = [[0 for x in range(len(A__ ) )] for x in range(len(A__ ) )]
for i in range(len(A__ ) ):
__lowerCamelCase : Tuple = prime_implicants[i].count('_' )
for j in range(len(A__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , A__ ):
__lowerCamelCase : Any = 1
return chart
def UpperCAmelCase ( ) -> None:
__lowerCamelCase : Optional[int] = int(input('Enter the no. of variables\n' ) )
__lowerCamelCase : List[Any] = [
float(A__ )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
__lowerCamelCase : Union[str, Any] = decimal_to_binary(A__ , A__ )
__lowerCamelCase : List[Any] = check(A__ )
print('Prime Implicants are:' )
print(A__ )
__lowerCamelCase : List[str] = prime_implicant_chart(A__ , A__ )
__lowerCamelCase : List[str] = selection(A__ , A__ )
print('Essential Prime Implicants are:' )
print(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 263 |
"""simple docstring"""
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
a_ : Tuple = logging.get_logger(__name__)
# General docstring
a_ : List[str] = '''PoolFormerConfig'''
# Base docstring
a_ : Optional[Any] = '''sail/poolformer_s12'''
a_ : List[Any] = [1, 5_12, 7, 7]
# Image classification docstring
a_ : Any = '''sail/poolformer_s12'''
a_ : Optional[int] = '''tabby, tabby cat'''
a_ : Optional[Any] = [
'''sail/poolformer_s12''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def UpperCAmelCase ( A__: Optional[Any] , A__: float = 0.0 , A__: bool = False ) -> Tuple:
if drop_prob == 0.0 or not training:
return input
__lowerCamelCase : Dict = 1 - drop_prob
__lowerCamelCase : List[Any] = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
__lowerCamelCase : List[Any] = keep_prob + torch.rand(A__ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
__lowerCamelCase : Any = input.div(A__ ) * random_tensor
return output
class __lowercase( nn.Module ):
'''simple docstring'''
def __init__( self , __a = None ):
super().__init__()
__lowerCamelCase : int = drop_prob
def snake_case_ ( self , __a ):
return drop_path(__a , self.drop_prob , self.training )
def snake_case_ ( self ):
return "p={}".format(self.drop_prob )
class __lowercase( nn.Module ):
'''simple docstring'''
def __init__( self , __a , __a , __a , __a , __a , __a=None ):
super().__init__()
__lowerCamelCase : int = patch_size if isinstance(__a , collections.abc.Iterable ) else (patch_size, patch_size)
__lowerCamelCase : int = stride if isinstance(__a , collections.abc.Iterable ) else (stride, stride)
__lowerCamelCase : Optional[int] = padding if isinstance(__a , collections.abc.Iterable ) else (padding, padding)
__lowerCamelCase : Optional[Any] = nn.Convad(__a , __a , kernel_size=__a , stride=__a , padding=__a )
__lowerCamelCase : List[str] = norm_layer(__a ) if norm_layer else nn.Identity()
def snake_case_ ( self , __a ):
__lowerCamelCase : List[Any] = self.projection(__a )
__lowerCamelCase : Dict = self.norm(__a )
return embeddings
class __lowercase( nn.GroupNorm ):
'''simple docstring'''
def __init__( self , __a , **__a ):
super().__init__(1 , __a , **__a )
class __lowercase( nn.Module ):
'''simple docstring'''
def __init__( self , __a ):
super().__init__()
__lowerCamelCase : str = nn.AvgPoolad(__a , stride=1 , padding=pool_size // 2 , count_include_pad=__a )
def snake_case_ ( self , __a ):
return self.pool(__a ) - hidden_states
class __lowercase( nn.Module ):
'''simple docstring'''
def __init__( self , __a , __a , __a , __a ):
super().__init__()
__lowerCamelCase : Any = nn.Convad(__a , __a , 1 )
__lowerCamelCase : Dict = nn.Convad(__a , __a , 1 )
__lowerCamelCase : List[Any] = PoolFormerDropPath(__a )
if isinstance(config.hidden_act , __a ):
__lowerCamelCase : List[str] = ACTaFN[config.hidden_act]
else:
__lowerCamelCase : str = config.hidden_act
def snake_case_ ( self , __a ):
__lowerCamelCase : int = self.conva(__a )
__lowerCamelCase : Dict = self.act_fn(__a )
__lowerCamelCase : List[str] = self.drop(__a )
__lowerCamelCase : int = self.conva(__a )
__lowerCamelCase : str = self.drop(__a )
return hidden_states
class __lowercase( nn.Module ):
'''simple docstring'''
def __init__( self , __a , __a , __a , __a , __a , __a ):
super().__init__()
__lowerCamelCase : Tuple = PoolFormerPooling(__a )
__lowerCamelCase : Union[str, Any] = PoolFormerOutput(__a , __a , __a , __a )
__lowerCamelCase : List[Any] = PoolFormerGroupNorm(__a )
__lowerCamelCase : List[Any] = PoolFormerGroupNorm(__a )
# Useful for training neural nets
__lowerCamelCase : Any = PoolFormerDropPath(__a ) if drop_path > 0.0 else nn.Identity()
__lowerCamelCase : Tuple = config.use_layer_scale
if config.use_layer_scale:
__lowerCamelCase : List[str] = nn.Parameter(
config.layer_scale_init_value * torch.ones((__a) ) , requires_grad=__a )
__lowerCamelCase : Optional[int] = nn.Parameter(
config.layer_scale_init_value * torch.ones((__a) ) , requires_grad=__a )
def snake_case_ ( self , __a ):
if self.use_layer_scale:
__lowerCamelCase : Union[str, Any] = self.pooling(self.before_norm(__a ) )
__lowerCamelCase : Any = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
__lowerCamelCase : Optional[Any] = hidden_states + self.drop_path(__a )
__lowerCamelCase : Tuple = ()
__lowerCamelCase : Optional[Any] = self.output(self.after_norm(__a ) )
__lowerCamelCase : List[Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
__lowerCamelCase : List[Any] = hidden_states + self.drop_path(__a )
__lowerCamelCase : Optional[Any] = (output,) + outputs
return outputs
else:
__lowerCamelCase : Tuple = self.drop_path(self.pooling(self.before_norm(__a ) ) )
# First residual connection
__lowerCamelCase : List[str] = pooling_output + hidden_states
__lowerCamelCase : int = ()
# Second residual connection inside the PoolFormerOutput block
__lowerCamelCase : List[str] = self.drop_path(self.output(self.after_norm(__a ) ) )
__lowerCamelCase : str = hidden_states + layer_output
__lowerCamelCase : int = (output,) + outputs
return outputs
class __lowercase( nn.Module ):
'''simple docstring'''
def __init__( self , __a ):
super().__init__()
__lowerCamelCase : int = config
# stochastic depth decay rule
__lowerCamelCase : int = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
__lowerCamelCase : List[str] = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
__lowerCamelCase : Optional[int] = nn.ModuleList(__a )
# Transformer blocks
__lowerCamelCase : Any = []
__lowerCamelCase : int = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
__lowerCamelCase : Optional[int] = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
__a , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(__a ) )
__lowerCamelCase : str = nn.ModuleList(__a )
def snake_case_ ( self , __a , __a=False , __a=True ):
__lowerCamelCase : Union[str, Any] = () if output_hidden_states else None
__lowerCamelCase : int = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
__lowerCamelCase , __lowerCamelCase : Any = layers
# Get patch embeddings from hidden_states
__lowerCamelCase : Any = embedding_layer(__a )
# Send the embeddings through the blocks
for _, blk in enumerate(__a ):
__lowerCamelCase : Optional[int] = blk(__a )
__lowerCamelCase : Tuple = layer_outputs[0]
if output_hidden_states:
__lowerCamelCase : Union[str, Any] = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__a , hidden_states=__a )
class __lowercase( lowercase__ ):
'''simple docstring'''
__a : Tuple = PoolFormerConfig
__a : Tuple = 'poolformer'
__a : Optional[int] = 'pixel_values'
__a : Optional[Any] = True
def snake_case_ ( self , __a ):
if isinstance(__a , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(__a , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def snake_case_ ( self , __a , __a=False ):
if isinstance(__a , __a ):
__lowerCamelCase : Union[str, Any] = value
a_ : Union[str, Any] = R'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
a_ : List[str] = R'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
'''
@add_start_docstrings(
'The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.' , lowercase__ , )
class __lowercase( lowercase__ ):
'''simple docstring'''
def __init__( self , __a ):
super().__init__(__a )
__lowerCamelCase : Optional[Any] = config
__lowerCamelCase : Any = PoolFormerEncoder(__a )
# Initialize weights and apply final processing
self.post_init()
def snake_case_ ( self ):
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(__a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__a , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def snake_case_ ( self , __a = None , __a = None , __a = None , ):
__lowerCamelCase : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase : Dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
__lowerCamelCase : Any = self.encoder(
__a , output_hidden_states=__a , return_dict=__a , )
__lowerCamelCase : int = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=__a , hidden_states=encoder_outputs.hidden_states , )
class __lowercase( nn.Module ):
'''simple docstring'''
def __init__( self , __a ):
super().__init__()
__lowerCamelCase : Optional[Any] = nn.Linear(config.hidden_size , config.hidden_size )
def snake_case_ ( self , __a ):
__lowerCamelCase : List[Any] = self.dense(__a )
return output
@add_start_docstrings(
'\n PoolFormer Model transformer with an image classification head on top\n ' , lowercase__ , )
class __lowercase( lowercase__ ):
'''simple docstring'''
def __init__( self , __a ):
super().__init__(__a )
__lowerCamelCase : str = config.num_labels
__lowerCamelCase : Optional[Any] = PoolFormerModel(__a )
# Final norm
__lowerCamelCase : str = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
__lowerCamelCase : Optional[Any] = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def snake_case_ ( self , __a = None , __a = None , __a = None , __a = None , ):
__lowerCamelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase : Tuple = self.poolformer(
__a , output_hidden_states=__a , return_dict=__a , )
__lowerCamelCase : int = outputs[0]
__lowerCamelCase : Optional[int] = self.classifier(self.norm(__a ).mean([-2, -1] ) )
__lowerCamelCase : Union[str, Any] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowerCamelCase : Any = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowerCamelCase : Any = 'single_label_classification'
else:
__lowerCamelCase : Optional[Any] = 'multi_label_classification'
if self.config.problem_type == "regression":
__lowerCamelCase : int = MSELoss()
if self.num_labels == 1:
__lowerCamelCase : Optional[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__lowerCamelCase : Optional[Any] = loss_fct(__a , __a )
elif self.config.problem_type == "single_label_classification":
__lowerCamelCase : Tuple = CrossEntropyLoss()
__lowerCamelCase : int = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__lowerCamelCase : List[Any] = BCEWithLogitsLoss()
__lowerCamelCase : Optional[Any] = loss_fct(__a , __a )
if not return_dict:
__lowerCamelCase : Optional[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__a , logits=__a , hidden_states=outputs.hidden_states )
| 263 | 1 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase__ ( _A):
"""simple docstring"""
a__ : List[str] = ["image_processor", "tokenizer"]
a__ : Optional[Any] = "LayoutLMv3ImageProcessor"
a__ : Union[str, Any] = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self : str , __lowerCAmelCase : Any=None , __lowerCAmelCase : Any=None , **__lowerCAmelCase : Optional[int] ) -> Any:
_A = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowerCAmelCase , )
_A = kwargs.pop('''feature_extractor''' )
_A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
def __call__( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __lowerCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , __lowerCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , __lowerCAmelCase : bool = True , __lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , __lowerCAmelCase : Union[bool, str, TruncationStrategy] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : int = 0 , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , **__lowerCAmelCase : int , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
_A = self.image_processor(images=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_A = [text] # add batch dimension (as the image processor always adds a batch dimension)
_A = features['''words''']
_A = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
# add pixel values
_A = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
_A = self.get_overflowing_images(__lowerCAmelCase , encoded_inputs['''overflow_to_sample_mapping'''] )
_A = images
return encoded_inputs
def snake_case_ ( self : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[Any] ) -> int:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_A = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__lowerCAmelCase ) != len(__lowerCAmelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(__lowerCAmelCase )} and {len(__lowerCAmelCase )}''' )
return images_with_overflow
def snake_case_ ( self : List[Any] , *__lowerCAmelCase : str , **__lowerCAmelCase : str ) -> Optional[int]:
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def snake_case_ ( self : Optional[int] , *__lowerCAmelCase : Optional[Any] , **__lowerCAmelCase : Optional[Any] ) -> str:
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
def snake_case_ ( self : List[str] ) -> Any:
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def snake_case_ ( self : Optional[Any] ) -> List[str]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __lowerCAmelCase , )
return self.image_processor_class
@property
def snake_case_ ( self : List[Any] ) -> Tuple:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __lowerCAmelCase , )
return self.image_processor
| 2 |
'''simple docstring'''
def A__ ( A : int , A : int):
'''simple docstring'''
return int((input_a, input_a).count(0) != 0)
def A__ ( ):
'''simple docstring'''
assert nand_gate(0 , 0) == 1
assert nand_gate(0 , 1) == 1
assert nand_gate(1 , 0) == 1
assert nand_gate(1 , 1) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 173 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __A ( unittest.TestCase ):
def _snake_case ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _snake_case ( self ):
torch.manual_seed(0 )
lowerCamelCase =UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return model
@property
def _snake_case ( self ):
torch.manual_seed(0 )
lowerCamelCase =UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , cross_attention_dim=10 , )
return model
@property
def _snake_case ( self ):
torch.manual_seed(0 )
lowerCamelCase =AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , )
lowerCamelCase =UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return vqvae, unet
@slow
def _snake_case ( self ):
lowerCamelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase =Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
lowerCamelCase =DDPMScheduler()
lowerCamelCase =AudioDiffusionPipeline(vqvae=UpperCAmelCase_ , unet=self.dummy_unet , mel=UpperCAmelCase_ , scheduler=UpperCAmelCase_ )
lowerCamelCase =pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCamelCase =torch.Generator(device=UpperCAmelCase_ ).manual_seed(42 )
lowerCamelCase =pipe(generator=UpperCAmelCase_ , steps=4 )
lowerCamelCase =output.audios[0]
lowerCamelCase =output.images[0]
lowerCamelCase =torch.Generator(device=UpperCAmelCase_ ).manual_seed(42 )
lowerCamelCase =pipe(generator=UpperCAmelCase_ , steps=4 , return_dict=UpperCAmelCase_ )
lowerCamelCase =output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
lowerCamelCase =np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
lowerCamelCase =np.frombuffer(image_from_tuple.tobytes() , dtype="""uint8""" )[:10]
lowerCamelCase =np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
lowerCamelCase =Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
lowerCamelCase =DDIMScheduler()
lowerCamelCase =self.dummy_vqvae_and_unet
lowerCamelCase =AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=UpperCAmelCase_ , scheduler=UpperCAmelCase_ )
lowerCamelCase =pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
np.random.seed(0 )
lowerCamelCase =np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
lowerCamelCase =torch.Generator(device=UpperCAmelCase_ ).manual_seed(42 )
lowerCamelCase =pipe(raw_audio=UpperCAmelCase_ , generator=UpperCAmelCase_ , start_step=5 , steps=10 )
lowerCamelCase =output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
lowerCamelCase =np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
lowerCamelCase =np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
lowerCamelCase =self.dummy_unet_condition
lowerCamelCase =AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=UpperCAmelCase_ , mel=UpperCAmelCase_ , scheduler=UpperCAmelCase_ )
lowerCamelCase =pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
np.random.seed(0 )
lowerCamelCase =torch.rand((1, 1, 10) )
lowerCamelCase =pipe(generator=UpperCAmelCase_ , encoding=UpperCAmelCase_ )
lowerCamelCase =output.images[0]
lowerCamelCase =np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
lowerCamelCase =np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def _snake_case ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ):
lowerCamelCase =torch_device
lowerCamelCase =DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" )
lowerCamelCase =pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCamelCase =torch.Generator(device=UpperCAmelCase_ ).manual_seed(42 )
lowerCamelCase =pipe(generator=UpperCAmelCase_ )
lowerCamelCase =output.audios[0]
lowerCamelCase =output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
lowerCamelCase =np.frombuffer(image.tobytes() , dtype="""uint8""" )[:10]
lowerCamelCase =np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 269 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ : str ={'''configuration_fnet''': ['''FNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : List[Any] =['''FNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Any =['''FNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Dict =[
'''FNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FNetForMaskedLM''',
'''FNetForMultipleChoice''',
'''FNetForNextSentencePrediction''',
'''FNetForPreTraining''',
'''FNetForQuestionAnswering''',
'''FNetForSequenceClassification''',
'''FNetForTokenClassification''',
'''FNetLayer''',
'''FNetModel''',
'''FNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 269 | 1 |
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
lowerCAmelCase : Optional[int] =['''small''', '''medium''', '''large''']
lowerCAmelCase : List[str] ='''lm_head.decoder.weight'''
lowerCAmelCase : Optional[Any] ='''lm_head.weight'''
def UpperCAmelCase_ ( __lowerCamelCase : str ,__lowerCamelCase : str ):
lowercase_ :Any = torch.load(__lowerCamelCase )
lowercase_ :Tuple = d.pop(__lowerCamelCase )
os.makedirs(__lowerCamelCase ,exist_ok=__lowerCamelCase )
torch.save(__lowerCamelCase ,os.path.join(__lowerCamelCase ,__lowerCamelCase ) )
if __name__ == "__main__":
lowerCAmelCase : int =argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
lowerCAmelCase : List[str] =parser.parse_args()
for MODEL in DIALOGPT_MODELS:
lowerCAmelCase : int =os.path.join(args.dialogpt_path, F'''{MODEL}_ft.pkl''')
lowerCAmelCase : List[str] =F'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 172 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : Dict =logging.get_logger(__name__)
lowerCAmelCase : List[Any] ={
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class a_ ( _lowerCAmelCase ):
__A = "distilbert"
__A = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__( self : int , lowercase : Union[str, Any]=30_522 , lowercase : List[Any]=512 , lowercase : Tuple=False , lowercase : Dict=6 , lowercase : List[str]=12 , lowercase : Union[str, Any]=768 , lowercase : int=4 * 768 , lowercase : Union[str, Any]=0.1 , lowercase : List[str]=0.1 , lowercase : List[str]="gelu" , lowercase : Tuple=0.02 , lowercase : int=0.1 , lowercase : Any=0.2 , lowercase : List[Any]=0 , **lowercase : Optional[Any] , ):
"""simple docstring"""
lowercase_ :Optional[int] = vocab_size
lowercase_ :Optional[int] = max_position_embeddings
lowercase_ :List[Any] = sinusoidal_pos_embds
lowercase_ :Dict = n_layers
lowercase_ :List[str] = n_heads
lowercase_ :int = dim
lowercase_ :str = hidden_dim
lowercase_ :Tuple = dropout
lowercase_ :Any = attention_dropout
lowercase_ :Optional[int] = activation
lowercase_ :Dict = initializer_range
lowercase_ :int = qa_dropout
lowercase_ :Tuple = seq_classif_dropout
super().__init__(**lowercase , pad_token_id=lowercase )
class a_ ( _lowerCAmelCase ):
@property
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
if self.task == "multiple-choice":
lowercase_ :int = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase_ :Tuple = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 172 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__lowercase :str = KandinskyImgaImgPipeline
__lowercase :Tuple = ["prompt", "image_embeds", "negative_image_embeds", "image"]
__lowercase :Optional[Any] = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
__lowercase :Any = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__lowercase :Tuple = False
@property
def _lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return 32
@property
def _lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
return 32
@property
def _lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.time_input_dim
@property
def _lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return 100
@property
def _lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
lowerCamelCase_ = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def _lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
lowerCamelCase_ = MultilingualCLIP(UpperCamelCase__ )
lowerCamelCase_ = text_encoder.eval()
return text_encoder
@property
def _lowerCAmelCase ( self ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowerCamelCase_ = UNetaDConditionModel(**UpperCamelCase__ )
return model
@property
def _lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase_ = VQModel(**self.dummy_movq_kwargs )
return model
def _lowerCAmelCase ( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = self.dummy_text_encoder
lowerCamelCase_ = self.dummy_tokenizer
lowerCamelCase_ = self.dummy_unet
lowerCamelCase_ = self.dummy_movq
lowerCamelCase_ = {
'''num_train_timesteps''': 1_000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00_085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
lowerCamelCase_ = DDIMScheduler(**UpperCamelCase__ )
lowerCamelCase_ = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__=0 ) -> Any:
'''simple docstring'''
lowerCamelCase_ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowerCamelCase_ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCamelCase__ )
# create init_image
lowerCamelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert('''RGB''' ).resize((256, 256) )
if str(UpperCamelCase__ ).startswith('''mps''' ):
lowerCamelCase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowerCamelCase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowerCamelCase_ = {
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def _lowerCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = '''cpu'''
lowerCamelCase_ = self.get_dummy_components()
lowerCamelCase_ = self.pipeline_class(**UpperCamelCase__ )
lowerCamelCase_ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase_ = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
lowerCamelCase_ = output.images
lowerCamelCase_ = pipe(
**self.get_dummy_inputs(UpperCamelCase__ ) , return_dict=UpperCamelCase__ , )[0]
lowerCamelCase_ = image[0, -3:, -3:, -1]
lowerCamelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ = np.array(
[0.61_474_943, 0.6_073_539, 0.43_308_544, 0.5_928_269, 0.47_493_595, 0.46_755_973, 0.4_613_838, 0.45_368_797, 0.50_119_233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
lowerCamelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
lowerCamelCase_ = '''A red cartoon frog, 4k'''
lowerCamelCase_ = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase__ )
lowerCamelCase_ = KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
lowerCamelCase_ = pipeline.to(UpperCamelCase__ )
pipeline.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCamelCase_ = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCamelCase_ , lowerCamelCase_ = pipe_prior(
UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
lowerCamelCase_ = pipeline(
UpperCamelCase__ , image=UpperCamelCase__ , image_embeds=UpperCamelCase__ , negative_image_embeds=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
lowerCamelCase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ ) | 66 |
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = ''''''
lowerCamelCase_ = ''''''
lowerCamelCase_ = []
lowerCamelCase_ = 0
lowerCamelCase_ = 256
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = 0
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> Any:
'''simple docstring'''
lowerCamelCase_ = cva.imread(UpperCamelCase__ , 0 )
lowerCamelCase_ = copy.deepcopy(self.img )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = plt.hist(self.img.ravel() , 256 , [0, 256] , label='''x''' )
lowerCamelCase_ = np.sum(UpperCamelCase__ )
for i in range(len(UpperCamelCase__ ) ):
lowerCamelCase_ = x[i] / self.k
self.sk += prk
lowerCamelCase_ = (self.L - 1) * self.sk
if self.rem != 0:
lowerCamelCase_ = int(last % last )
lowerCamelCase_ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(UpperCamelCase__ )
lowerCamelCase_ = int(np.ma.count(self.img ) / self.img[1].size )
lowerCamelCase_ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowerCamelCase_ = self.img[j][i]
if num != self.last_list[num]:
lowerCamelCase_ = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def _lowerCAmelCase ( self ) -> str:
'''simple docstring'''
plt.hist(self.img.ravel() , 256 , [0, 256] )
def _lowerCAmelCase ( self ) -> int:
'''simple docstring'''
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(5_000 )
cva.destroyAllWindows()
if __name__ == "__main__":
__lowercase : List[Any] = os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
__lowercase : List[str] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image() | 66 | 1 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
_lowerCamelCase = ['gpt2']
_lowerCamelCase = 'gpt2'
if is_tf_available():
class UpperCamelCase_ ( tf.Module ):
def __init__( self :Optional[Any] , __A :Union[str, Any] ) -> List[Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = tokenizer
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__A )
SCREAMING_SNAKE_CASE__ = TFGPTaLMHeadModel.from_config(__A )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="""text""" ),) )
def _snake_case ( self :Optional[int] , __A :int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.tokenizer(__A )
SCREAMING_SNAKE_CASE__ = tokenized["""input_ids"""].to_tensor()
SCREAMING_SNAKE_CASE__ = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
SCREAMING_SNAKE_CASE__ = self.model(input_ids=__A , attention_mask=__A )["""logits"""]
return outputs
@require_tf
@require_keras_nlp
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :List[Any] ) -> Dict:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ = [GPTaTokenizer.from_pretrained(__A ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
SCREAMING_SNAKE_CASE__ = [TFGPTaTokenizer.from_pretrained(__A ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
SCREAMING_SNAKE_CASE__ = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
SCREAMING_SNAKE_CASE__ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _snake_case ( self :Any ) -> Any:
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
SCREAMING_SNAKE_CASE__ = tokenizer([test_inputs] , return_tensors="""tf""" )
SCREAMING_SNAKE_CASE__ = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
SCREAMING_SNAKE_CASE__ = python_outputs[key].numpy()
SCREAMING_SNAKE_CASE__ = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(__A , tf.intaa ) == tf_outputs_values ) )
@slow
def _snake_case ( self :Any ) -> int:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE__ = tf.function(__A )
for test_inputs in self.test_sentences:
SCREAMING_SNAKE_CASE__ = tf.constant(__A )
SCREAMING_SNAKE_CASE__ = compiled_tokenizer(__A )
SCREAMING_SNAKE_CASE__ = tf_tokenizer(__A )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _snake_case ( self :List[Any] ) -> Any:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE__ = ModelToSave(tokenizer=__A )
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor([self.test_sentences[0]] )
SCREAMING_SNAKE_CASE__ = model.serving(__A ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
SCREAMING_SNAKE_CASE__ = Path(__A ) / """saved.model"""
tf.saved_model.save(__A , __A , signatures={"""serving_default""": model.serving} )
SCREAMING_SNAKE_CASE__ = tf.saved_model.load(__A )
SCREAMING_SNAKE_CASE__ = loaded_model.signatures["""serving_default"""](__A )["""output_0"""]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def _snake_case ( self :Dict ) -> int:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor([self.test_sentences[0]] )
SCREAMING_SNAKE_CASE__ = tf_tokenizer(__A ) # Build model with some sample inputs
SCREAMING_SNAKE_CASE__ = tf_tokenizer.get_config()
SCREAMING_SNAKE_CASE__ = TFGPTaTokenizer.from_config(__A )
SCREAMING_SNAKE_CASE__ = model_from_config(__A )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def _snake_case ( self :List[Any] ) -> Any:
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
SCREAMING_SNAKE_CASE__ = 12_3123
for max_length in [3, 5, 1024]:
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor([self.test_sentences[0]] )
SCREAMING_SNAKE_CASE__ = tf_tokenizer(__A , max_length=__A )
SCREAMING_SNAKE_CASE__ = out["""input_ids"""].numpy().shape[1]
assert out_length == max_length | 6 |
from copy import deepcopy
class __UpperCamelCase :
def __init__( self : List[str] , lowerCAmelCase : list[int] | None = None , lowerCAmelCase : int | None = None ):
'''simple docstring'''
if arr is None and size is not None:
UpperCAmelCase_ = size
UpperCAmelCase_ = [0] * size
elif arr is not None:
self.init(lowerCAmelCase )
else:
raise ValueError("Either arr or size must be specified" )
def __A ( self : Tuple , lowerCAmelCase : list[int] ):
'''simple docstring'''
UpperCAmelCase_ = len(lowerCAmelCase )
UpperCAmelCase_ = deepcopy(lowerCAmelCase )
for i in range(1 , self.size ):
UpperCAmelCase_ = self.next_(lowerCAmelCase )
if j < self.size:
self.tree[j] += self.tree[i]
def __A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
UpperCAmelCase_ = self.next_(lowerCAmelCase )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __A ( lowerCAmelCase : int ):
'''simple docstring'''
return index + (index & (-index))
@staticmethod
def __A ( lowerCAmelCase : int ):
'''simple docstring'''
return index - (index & (-index))
def __A ( self : List[str] , lowerCAmelCase : int , lowerCAmelCase : int ):
'''simple docstring'''
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
UpperCAmelCase_ = self.next_(lowerCAmelCase )
def __A ( self : List[str] , lowerCAmelCase : int , lowerCAmelCase : int ):
'''simple docstring'''
self.add(lowerCAmelCase , value - self.get(lowerCAmelCase ) )
def __A ( self : Tuple , lowerCAmelCase : int ):
'''simple docstring'''
if right == 0:
return 0
UpperCAmelCase_ = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
UpperCAmelCase_ = self.prev(lowerCAmelCase )
return result
def __A ( self : Any , lowerCAmelCase : int , lowerCAmelCase : int ):
'''simple docstring'''
return self.prefix(lowerCAmelCase ) - self.prefix(lowerCAmelCase )
def __A ( self : Any , lowerCAmelCase : int ):
'''simple docstring'''
return self.query(lowerCAmelCase , index + 1 )
def __A ( self : List[str] , lowerCAmelCase : int ):
'''simple docstring'''
value -= self.tree[0]
if value < 0:
return -1
UpperCAmelCase_ = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
UpperCAmelCase_ = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod() | 162 | 0 |
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__UpperCAmelCase : Union[str, Any] = logging.getLogger(__name__)
class __snake_case ( __snake_case ):
'''simple docstring'''
lowerCAmelCase__ = 'token-classification'
def __init__( self : str , A : Tuple ):
if type(A_ ) == dict:
__snake_case: Any = Namespace(**A_ )
__snake_case: Tuple = import_module("""tasks""" )
try:
__snake_case: Dict = getattr(A_ , hparams.task_type )
__snake_case: TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'''Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
__snake_case: List[Any] = self.token_classification_task.get_labels(hparams.labels )
__snake_case: Any = CrossEntropyLoss().ignore_index
super().__init__(A_ , len(self.labels ) , self.mode )
def UpperCAmelCase__ ( self : Union[str, Any] , **A : Dict ):
return self.model(**A_ )
def UpperCAmelCase__ ( self : Dict , A : Union[str, Any] , A : List[str] ):
__snake_case: Optional[Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
__snake_case: str = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
__snake_case: List[str] = self(**A_ )
__snake_case: Optional[int] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: List[str] = self.hparams
for mode in ["train", "dev", "test"]:
__snake_case: str = self._feature_file(A_ )
if os.path.exists(A_ ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , A_ )
__snake_case: Union[str, Any] = torch.load(A_ )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
__snake_case: int = self.token_classification_task.read_examples_from_file(args.data_dir , A_ )
__snake_case: Optional[Any] = self.token_classification_task.convert_examples_to_features(
A_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=A_ , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("""Saving features into cached file %s""" , A_ )
torch.save(A_ , A_ )
def UpperCAmelCase__ ( self : Tuple , A : Union[str, Any] , A : Union[str, Any] , A : str = False ):
__snake_case: Optional[Any] = self._feature_file(A_ )
logger.info("""Loading features from cached file %s""" , A_ )
__snake_case: str = torch.load(A_ )
__snake_case: Optional[Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
__snake_case: List[Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
__snake_case: List[Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
__snake_case: str = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
__snake_case: Tuple = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(A_ , A_ , A_ , A_ ) , batch_size=A_ )
def UpperCAmelCase__ ( self : Tuple , A : Any , A : Tuple ):
"""Compute validation""" ""
__snake_case: int = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
__snake_case: List[Any] = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
__snake_case: Optional[Any] = self(**A_ )
__snake_case: Tuple = outputs[:2]
__snake_case: Optional[Any] = logits.detach().cpu().numpy()
__snake_case: Optional[Any] = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCAmelCase__ ( self : str , A : Optional[int] ):
__snake_case: Optional[int] = torch.stack([x["""val_loss"""] for x in outputs] ).mean()
__snake_case: List[str] = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
__snake_case: List[str] = np.argmax(A_ , axis=2 )
__snake_case: Union[str, Any] = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
__snake_case: List[str] = dict(enumerate(self.labels ) )
__snake_case: Tuple = [[] for _ in range(out_label_ids.shape[0] )]
__snake_case: List[Any] = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
__snake_case: Union[str, Any] = {
"val_loss": val_loss_mean,
"accuracy_score": accuracy_score(A_ , A_ ),
"precision": precision_score(A_ , A_ ),
"recall": recall_score(A_ , A_ ),
"f1": fa_score(A_ , A_ ),
}
__snake_case: Tuple = dict(results.items() )
__snake_case: int = results
return ret, preds_list, out_label_list
def UpperCAmelCase__ ( self : Tuple , A : Optional[int] ):
__snake_case: str = self._eval_end(A_ )
__snake_case: Dict = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCAmelCase__ ( self : int , A : Tuple ):
__snake_case: List[str] = self._eval_end(A_ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
__snake_case: Tuple = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCAmelCase__ ( A : Any , A : str ):
BaseTransformer.add_model_specific_args(A_ , A_ )
parser.add_argument(
"""--task_type""" , default="""NER""" , type=A_ , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=A_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--labels""" , default="""""" , type=A_ , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=A_ , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
if __name__ == "__main__":
__UpperCAmelCase : Tuple = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__UpperCAmelCase : Union[str, Any] = NERTransformer.add_model_specific_args(parser, os.getcwd())
__UpperCAmelCase : Tuple = parser.parse_args()
__UpperCAmelCase : Any = NERTransformer(args)
__UpperCAmelCase : Tuple = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__UpperCAmelCase : List[Any] = sorted(glob.glob(os.path.join(args.output_dir, "checkpoint-epoch=*.ckpt"), recursive=True))
__UpperCAmelCase : Union[str, Any] = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 716 |
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
__UpperCAmelCase : Optional[Any] = 4
__UpperCAmelCase : str = 3
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
pass
def A__ ( SCREAMING_SNAKE_CASE__) -> List[str]:
for shard in shards:
for i in range(SCREAMING_SNAKE_CASE__):
yield {"i": i, "shard": shard}
def A__ ( ) -> Optional[Any]:
__snake_case: Optional[int] = int(os.environ["""RANK"""])
__snake_case: Dict = int(os.environ["""WORLD_SIZE"""])
__snake_case: Union[str, Any] = ArgumentParser()
parser.add_argument("""--streaming""" , type=SCREAMING_SNAKE_CASE__)
parser.add_argument("""--local_rank""" , type=SCREAMING_SNAKE_CASE__)
parser.add_argument("""--num_workers""" , type=SCREAMING_SNAKE_CASE__ , default=0)
__snake_case: Union[str, Any] = parser.parse_args()
__snake_case: Union[str, Any] = args.streaming
__snake_case: Dict = args.num_workers
__snake_case: Optional[Any] = {"""shards""": [F'''shard_{shard_idx}''' for shard_idx in range(SCREAMING_SNAKE_CASE__)]}
__snake_case: Union[str, Any] = IterableDataset.from_generator(SCREAMING_SNAKE_CASE__ , gen_kwargs=SCREAMING_SNAKE_CASE__)
if not streaming:
__snake_case: int = Dataset.from_list(list(SCREAMING_SNAKE_CASE__))
__snake_case: List[str] = split_dataset_by_node(SCREAMING_SNAKE_CASE__ , rank=SCREAMING_SNAKE_CASE__ , world_size=SCREAMING_SNAKE_CASE__)
__snake_case: Tuple = torch.utils.data.DataLoader(SCREAMING_SNAKE_CASE__ , num_workers=SCREAMING_SNAKE_CASE__)
__snake_case: int = NUM_SHARDS * NUM_ITEMS_PER_SHARD
__snake_case: str = full_size // world_size
expected_local_size += int(rank < (full_size % world_size))
__snake_case: Tuple = sum(1 for _ in dataloader)
if local_size != expected_local_size:
raise FailedTestError(F'''local_size {local_size} != expected_local_size {expected_local_size}''')
if __name__ == "__main__":
main()
| 155 | 0 |
'''simple docstring'''
class __magic_name__ :
def __init__( self : Optional[Any] ):
_a : dict[str, TrieNode] = {} # Mapping from char to TrieNode
_a : List[Any] = False
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : list[str] ):
for word in words:
self.insert(_UpperCAmelCase )
def __lowercase ( self : Any ,_UpperCAmelCase : str ):
_a : str = self
for char in word:
if char not in curr.nodes:
_a : List[Any] = TrieNode()
_a : Dict = curr.nodes[char]
_a : Optional[int] = True
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : str ):
_a : Optional[Any] = self
for char in word:
if char not in curr.nodes:
return False
_a : Optional[Any] = curr.nodes[char]
return curr.is_leaf
def __lowercase ( self : int ,_UpperCAmelCase : str ):
def _delete(_UpperCAmelCase : TrieNode ,_UpperCAmelCase : str ,_UpperCAmelCase : int ) -> bool:
if index == len(_UpperCAmelCase ):
# If word does not exist
if not curr.is_leaf:
return False
_a : str = False
return len(curr.nodes ) == 0
_a : List[str] = word[index]
_a : Any = curr.nodes.get(_UpperCAmelCase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
_a : Optional[int] = _delete(_UpperCAmelCase ,_UpperCAmelCase ,index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self ,_UpperCAmelCase ,0 )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
if node.is_leaf:
print(lowerCAmelCase_ , end=' ' )
for key, value in node.nodes.items():
print_words(lowerCAmelCase_ , word + key )
def __lowerCamelCase ( ) -> bool:
_a : Optional[Any] = 'banana bananas bandana band apple all beast'.split()
_a : str = TrieNode()
root.insert_many(lowerCAmelCase_ )
# print_words(root, "")
assert all(root.find(lowerCAmelCase_ ) for word in words )
assert root.find('banana' )
assert not root.find('bandanas' )
assert not root.find('apps' )
assert root.find('apple' )
assert root.find('all' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
print(str(lowerCAmelCase_ ) , 'works!' if passes else 'doesn\'t work :(' )
def __lowerCamelCase ( ) -> None:
assert test_trie()
def __lowerCamelCase ( ) -> None:
print_results('Testing trie functionality' , test_trie() )
if __name__ == "__main__":
main()
| 358 |
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 358 | 1 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a():
'''simple docstring'''
snake_case_ = ArgumentParser(
description=(
'PyTorch TPU distributed training launch '
'helper utility that will spawn up '
'multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=lowercase__ , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=lowercase__ , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=lowercase__ )
return parser.parse_args()
def a():
'''simple docstring'''
snake_case_ = parse_args()
# Import training_script as a module.
snake_case_ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
snake_case_ = script_fpath.stem
snake_case_ = importlib.import_module(lowercase__ )
# Patch sys.argv
snake_case_ = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 46 |
from collections import defaultdict
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = first_str.lower().strip()
snake_case_ = second_str.lower().strip()
# Remove whitespace
snake_case_ = first_str.replace(' ' , '' )
snake_case_ = second_str.replace(' ' , '' )
# Strings of different lengths are not anagrams
if len(lowercase__ ) != len(lowercase__ ):
return False
# Default values for count should be 0
snake_case_ = defaultdict(lowercase__ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(lowercase__ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
A = input('Enter the first string ').strip()
A = input('Enter the second string ').strip()
A = check_anagrams(input_a, input_b)
print(f"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 46 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : List[Any] = {
"Salesforce/instruct-blip-flan-t5": "https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json",
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : str = "instructblip_vision_model"
def __init__( self : Tuple , lowerCAmelCase : List[str]=14_08 , lowerCAmelCase : Union[str, Any]=61_44 , lowerCAmelCase : Optional[Any]=39 , lowerCAmelCase : Union[str, Any]=16 , lowerCAmelCase : Union[str, Any]=2_24 , lowerCAmelCase : Optional[int]=14 , lowerCAmelCase : List[Any]="gelu" , lowerCAmelCase : Optional[Any]=1E-6 , lowerCAmelCase : Optional[Any]=0.0 , lowerCAmelCase : List[Any]=1E-1_0 , lowerCAmelCase : Any=True , **lowerCAmelCase : Dict , ) -> Any:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
lowercase__ = hidden_size
lowercase__ = intermediate_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = patch_size
lowercase__ = image_size
lowercase__ = initializer_range
lowercase__ = attention_dropout
lowercase__ = layer_norm_eps
lowercase__ = hidden_act
lowercase__ = qkv_bias
@classmethod
def UpperCAmelCase ( cls : Optional[int] , lowerCAmelCase : Union[str, os.PathLike] , **lowerCAmelCase : Optional[int]) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase)
lowercase__, lowercase__ = cls.get_config_dict(lowerCAmelCase , **lowerCAmelCase)
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type') == "instructblip":
lowercase__ = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''')
return cls.from_dict(lowerCAmelCase , **lowerCAmelCase)
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Dict = "instructblip_qformer"
def __init__( self : Union[str, Any] , lowerCAmelCase : str=3_05_22 , lowerCAmelCase : Tuple=7_68 , lowerCAmelCase : Optional[int]=12 , lowerCAmelCase : int=12 , lowerCAmelCase : str=30_72 , lowerCAmelCase : Optional[Any]="gelu" , lowerCAmelCase : Any=0.1 , lowerCAmelCase : Any=0.1 , lowerCAmelCase : List[str]=5_12 , lowerCAmelCase : List[Any]=0.02 , lowerCAmelCase : Any=1E-1_2 , lowerCAmelCase : Any=0 , lowerCAmelCase : Union[str, Any]="absolute" , lowerCAmelCase : Any=2 , lowerCAmelCase : str=14_08 , **lowerCAmelCase : List[Any] , ) -> str:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase , **lowerCAmelCase)
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = cross_attention_frequency
lowercase__ = encoder_hidden_size
@classmethod
def UpperCAmelCase ( cls : List[str] , lowerCAmelCase : Union[str, os.PathLike] , **lowerCAmelCase : Any) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase)
lowercase__, lowercase__ = cls.get_config_dict(lowerCAmelCase , **lowerCAmelCase)
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type') == "instructblip":
lowercase__ = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type') and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''')
return cls.from_dict(lowerCAmelCase , **lowerCAmelCase)
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Tuple = "instructblip"
A : Optional[int] = True
def __init__( self : Optional[int] , lowerCAmelCase : Any=None , lowerCAmelCase : Optional[Any]=None , lowerCAmelCase : Dict=None , lowerCAmelCase : Union[str, Any]=32 , **lowerCAmelCase : int) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
if vision_config is None:
lowercase__ = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.')
if qformer_config is None:
lowercase__ = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.')
if text_config is None:
lowercase__ = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).')
lowercase__ = InstructBlipVisionConfig(**lowerCAmelCase)
lowercase__ = InstructBlipQFormerConfig(**lowerCAmelCase)
lowercase__ = text_config['model_type'] if 'model_type' in text_config else 'opt'
lowercase__ = CONFIG_MAPPING[text_model_type](**lowerCAmelCase)
lowercase__ = self.text_config.tie_word_embeddings
lowercase__ = self.text_config.is_encoder_decoder
lowercase__ = num_query_tokens
lowercase__ = self.vision_config.hidden_size
lowercase__ = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowercase__ = 1.0
lowercase__ = 0.02
@classmethod
def UpperCAmelCase ( cls : Dict , lowerCAmelCase : InstructBlipVisionConfig , lowerCAmelCase : InstructBlipQFormerConfig , lowerCAmelCase : PretrainedConfig , **lowerCAmelCase : Tuple , ) -> List[str]:
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowerCAmelCase , )
def UpperCAmelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = copy.deepcopy(self.__dict__)
lowercase__ = self.vision_config.to_dict()
lowercase__ = self.qformer_config.to_dict()
lowercase__ = self.text_config.to_dict()
lowercase__ = self.__class__.model_type
return output
| 622 |
from sklearn.metrics import matthews_corrcoef
import datasets
a__ : Any = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
a__ : Any = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
a__ : str = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase ( self : List[Any]) -> int:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int32'),
'references': datasets.Value('int32'),
}) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'
] , )
def UpperCAmelCase ( self : Dict , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any]=None) -> Any:
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(lowerCAmelCase , lowerCAmelCase , sample_weight=lowerCAmelCase)),
}
| 622 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase = {
'''configuration_convnext''': ['''CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvNextConfig''', '''ConvNextOnnxConfig''']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''ConvNextFeatureExtractor''']
lowercase = ['''ConvNextImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvNextForImageClassification''',
'''ConvNextModel''',
'''ConvNextPreTrainedModel''',
'''ConvNextBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TFConvNextForImageClassification''',
'''TFConvNextModel''',
'''TFConvNextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 717 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[str] = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : str = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
__SCREAMING_SNAKE_CASE : Tuple = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
}
__SCREAMING_SNAKE_CASE : List[Any] = os.path.join(self.tmpdirname , a__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(a__ , a__ )
def a_ ( self , **a__ ):
return BertTokenizer.from_pretrained(self.tmpdirname , **a__ )
def a_ ( self , **a__ ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **a__ )
def a_ ( self , **a__ ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **a__ )
def a_ ( self ):
shutil.rmtree(self.tmpdirname )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__SCREAMING_SNAKE_CASE : List[Any] = [Image.fromarray(np.moveaxis(a__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Optional[int] = AlignProcessor(tokenizer=a__ , image_processor=a__ )
processor_slow.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : List[str] = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=a__ )
__SCREAMING_SNAKE_CASE : Optional[int] = AlignProcessor(tokenizer=a__ , image_processor=a__ )
processor_fast.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : List[str] = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , a__ )
self.assertIsInstance(processor_fast.tokenizer , a__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , a__ )
self.assertIsInstance(processor_fast.image_processor , a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Optional[Any] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__SCREAMING_SNAKE_CASE : List[str] = self.get_image_processor(do_normalize=a__ , padding_value=1.0 )
__SCREAMING_SNAKE_CASE : Any = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=a__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , a__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Dict = self.get_image_processor()
__SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Tuple = AlignProcessor(tokenizer=a__ , image_processor=a__ )
__SCREAMING_SNAKE_CASE : List[Any] = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : List[Any] = image_processor(a__ , return_tensors="np" )
__SCREAMING_SNAKE_CASE : Dict = processor(images=a__ , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Tuple = self.get_image_processor()
__SCREAMING_SNAKE_CASE : int = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : List[Any] = AlignProcessor(tokenizer=a__ , image_processor=a__ )
__SCREAMING_SNAKE_CASE : Any = "lower newer"
__SCREAMING_SNAKE_CASE : List[str] = processor(text=a__ )
__SCREAMING_SNAKE_CASE : int = tokenizer(a__ , padding="max_length" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_image_processor()
__SCREAMING_SNAKE_CASE : str = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Optional[Any] = AlignProcessor(tokenizer=a__ , image_processor=a__ )
__SCREAMING_SNAKE_CASE : List[str] = "lower newer"
__SCREAMING_SNAKE_CASE : str = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=a__ , images=a__ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(a__ ):
processor()
def a_ ( self ):
__SCREAMING_SNAKE_CASE : str = self.get_image_processor()
__SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Dict = AlignProcessor(tokenizer=a__ , image_processor=a__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__SCREAMING_SNAKE_CASE : Dict = processor.batch_decode(a__ )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.batch_decode(a__ )
self.assertListEqual(a__ , a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : str = AlignProcessor(tokenizer=a__ , image_processor=a__ )
__SCREAMING_SNAKE_CASE : Any = "lower newer"
__SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : List[str] = processor(text=a__ , images=a__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 564 | 0 |
from __future__ import annotations
from collections.abc import Callable
__A : int = list[list[float | int]]
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Matrix:
"""simple docstring"""
_A = len(_SCREAMING_SNAKE_CASE )
_A = [[0 for _ in range(size + 1 )] for _ in range(_SCREAMING_SNAKE_CASE )]
_A = 42
_A = 42
_A = 42
_A = 42
_A = 42
_A = 42
for row in range(_SCREAMING_SNAKE_CASE ):
for col in range(_SCREAMING_SNAKE_CASE ):
_A = matrix[row][col]
_A = vector[row][0]
_A = 0
_A = 0
while row < size and col < size:
# pivoting
_A = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
_A, _A = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _SCREAMING_SNAKE_CASE ):
_A = augmented[rowa][col] / augmented[row][col]
_A = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _SCREAMING_SNAKE_CASE ):
for row in range(_SCREAMING_SNAKE_CASE ):
_A = augmented[row][col] / augmented[col][col]
for cola in range(_SCREAMING_SNAKE_CASE , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_SCREAMING_SNAKE_CASE )
]
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> Callable[[int], int]:
"""simple docstring"""
_A = len(_SCREAMING_SNAKE_CASE )
_A = [[0 for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(_SCREAMING_SNAKE_CASE )]
_A = [[0] for _ in range(_SCREAMING_SNAKE_CASE )]
_A = 42
_A = 42
_A = 42
_A = 42
for x_val, y_val in enumerate(_SCREAMING_SNAKE_CASE ):
for col in range(_SCREAMING_SNAKE_CASE ):
_A = (x_val + 1) ** (size - col - 1)
_A = y_val
_A = solve(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def interpolated_func(_SCREAMING_SNAKE_CASE ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_SCREAMING_SNAKE_CASE ) )
return interpolated_func
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE = question_function , _SCREAMING_SNAKE_CASE = 10 ) -> int:
"""simple docstring"""
_A = [func(_SCREAMING_SNAKE_CASE ) for x_val in range(1 , order + 1 )]
_A = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
_A = 0
_A = 42
_A = 42
for poly in polynomials:
_A = 1
while func(_SCREAMING_SNAKE_CASE ) == poly(_SCREAMING_SNAKE_CASE ):
x_val += 1
ret += poly(_SCREAMING_SNAKE_CASE )
return ret
if __name__ == "__main__":
print(f"{solution() = }")
| 27 |
from __future__ import annotations
from typing import Any
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : str , __snake_case : int = 6 ) -> None:
'''simple docstring'''
lowerCamelCase = None
lowerCamelCase = None
self.create_linked_list(__snake_case )
def lowerCamelCase__ ( self : Optional[int] , __snake_case : int ) -> None:
'''simple docstring'''
lowerCamelCase = Node()
lowerCamelCase = current_node
lowerCamelCase = current_node
lowerCamelCase = current_node
for _ in range(1 , __snake_case ):
lowerCamelCase = Node()
lowerCamelCase = current_node
lowerCamelCase = previous_node
lowerCamelCase = current_node
lowerCamelCase = self.front
lowerCamelCase = previous_node
def lowerCamelCase__ ( self : List[str] ) -> bool:
'''simple docstring'''
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def lowerCamelCase__ ( self : Tuple ) -> Any | None:
'''simple docstring'''
self.check_can_perform_operation()
return self.front.data if self.front else None
def lowerCamelCase__ ( self : Optional[int] , __snake_case : Any ) -> None:
'''simple docstring'''
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
lowerCamelCase = self.rear.next
if self.rear:
lowerCamelCase = data
def lowerCamelCase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
lowerCamelCase = self.front.data
lowerCamelCase = None
return data
lowerCamelCase = self.front
lowerCamelCase = old_front.next
lowerCamelCase = old_front.data
lowerCamelCase = None
return data
def lowerCamelCase__ ( self : Optional[int] ) -> None:
'''simple docstring'''
if self.is_empty():
raise Exception('Empty Queue' )
def lowerCamelCase__ ( self : int ) -> None:
'''simple docstring'''
if self.rear and self.rear.next == self.front:
raise Exception('Full Queue' )
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : int ) -> None:
'''simple docstring'''
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 246 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
def __init__( self : Dict ):
"""simple docstring"""
__lowerCamelCase : Any = []
def a_ ( self : Optional[Any] , A__ : Dict , A__ : Optional[int] , A__ : Tuple , **A__ : Tuple ):
"""simple docstring"""
self.events.append("""on_init_end""" )
def a_ ( self : str , A__ : List[Any] , A__ : Tuple , A__ : Tuple , **A__ : Optional[Any] ):
"""simple docstring"""
self.events.append("""on_train_begin""" )
def a_ ( self : Tuple , A__ : List[Any] , A__ : List[str] , A__ : List[str] , **A__ : Tuple ):
"""simple docstring"""
self.events.append("""on_train_end""" )
def a_ ( self : Union[str, Any] , A__ : str , A__ : str , A__ : Optional[Any] , **A__ : Dict ):
"""simple docstring"""
self.events.append("""on_epoch_begin""" )
def a_ ( self : List[Any] , A__ : List[str] , A__ : List[Any] , A__ : Dict , **A__ : List[str] ):
"""simple docstring"""
self.events.append("""on_epoch_end""" )
def a_ ( self : int , A__ : Optional[Any] , A__ : Any , A__ : Any , **A__ : int ):
"""simple docstring"""
self.events.append("""on_step_begin""" )
def a_ ( self : Any , A__ : int , A__ : Any , A__ : Tuple , **A__ : int ):
"""simple docstring"""
self.events.append("""on_step_end""" )
def a_ ( self : str , A__ : int , A__ : Optional[Any] , A__ : str , **A__ : Optional[Any] ):
"""simple docstring"""
self.events.append("""on_evaluate""" )
def a_ ( self : str , A__ : str , A__ : Union[str, Any] , A__ : Any , **A__ : int ):
"""simple docstring"""
self.events.append("""on_predict""" )
def a_ ( self : str , A__ : str , A__ : int , A__ : List[Any] , **A__ : Dict ):
"""simple docstring"""
self.events.append("""on_save""" )
def a_ ( self : List[str] , A__ : str , A__ : List[str] , A__ : Any , **A__ : Dict ):
"""simple docstring"""
self.events.append("""on_log""" )
def a_ ( self : int , A__ : List[Any] , A__ : List[str] , A__ : Any , **A__ : Union[str, Any] ):
"""simple docstring"""
self.events.append("""on_prediction_step""" )
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def a_ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase : List[Any] = tempfile.mkdtemp()
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
shutil.rmtree(self.output_dir )
def a_ ( self : Optional[Any] , A__ : Any=0 , A__ : Any=0 , A__ : Dict=64 , A__ : Dict=64 , A__ : List[str]=None , A__ : Union[str, Any]=False , **A__ : Tuple ):
"""simple docstring"""
__lowerCamelCase : Optional[int] = RegressionDataset(length=A__ )
__lowerCamelCase : Optional[int] = RegressionDataset(length=A__ )
__lowerCamelCase : Any = RegressionModelConfig(a=A__ , b=A__ )
__lowerCamelCase : Optional[Any] = RegressionPreTrainedModel(A__ )
__lowerCamelCase : List[str] = TrainingArguments(self.output_dir , disable_tqdm=A__ , report_to=[] , **A__ )
return Trainer(
A__ , A__ , train_dataset=A__ , eval_dataset=A__ , callbacks=A__ , )
def a_ ( self : List[str] , A__ : Optional[int] , A__ : Dict ):
"""simple docstring"""
self.assertEqual(len(A__ ) , len(A__ ) )
# Order doesn't matter
__lowerCamelCase : List[Any] = sorted(A__ , key=lambda A__ : cb.__name__ if isinstance(A__ , A__ ) else cb.__class__.__name__ )
__lowerCamelCase : int = sorted(A__ , key=lambda A__ : cb.__name__ if isinstance(A__ , A__ ) else cb.__class__.__name__ )
for cba, cba in zip(A__ , A__ ):
if isinstance(A__ , A__ ) and isinstance(A__ , A__ ):
self.assertEqual(A__ , A__ )
elif isinstance(A__ , A__ ) and not isinstance(A__ , A__ ):
self.assertEqual(A__ , cba.__class__ )
elif not isinstance(A__ , A__ ) and isinstance(A__ , A__ ):
self.assertEqual(cba.__class__ , A__ )
else:
self.assertEqual(A__ , A__ )
def a_ ( self : Optional[int] , A__ : List[Any] ):
"""simple docstring"""
__lowerCamelCase : Tuple = ["""on_init_end""", """on_train_begin"""]
__lowerCamelCase : str = 0
__lowerCamelCase : List[Any] = len(trainer.get_eval_dataloader() )
__lowerCamelCase : List[Any] = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("""on_epoch_begin""" )
for _ in range(A__ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""" )
expected_events.append("""on_epoch_end""" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase : int = self.get_trainer()
__lowerCamelCase : List[str] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , A__ )
# Callbacks passed at init are added to the default callbacks
__lowerCamelCase : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(A__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A__ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__lowerCamelCase : List[Any] = self.get_trainer(disable_tqdm=A__ )
__lowerCamelCase : int = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , A__ )
def a_ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase : List[Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__lowerCamelCase : Optional[int] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(A__ )
expected_callbacks.remove(A__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A__ )
__lowerCamelCase : List[Any] = self.get_trainer()
__lowerCamelCase : Any = trainer.pop_callback(A__ )
self.assertEqual(cb.__class__ , A__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A__ )
trainer.add_callback(A__ )
expected_callbacks.insert(0 , A__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A__ )
# We can also add, pop, or remove by instance
__lowerCamelCase : Optional[int] = self.get_trainer()
__lowerCamelCase : Union[str, Any] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(A__ )
expected_callbacks.remove(A__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A__ )
__lowerCamelCase : int = self.get_trainer()
__lowerCamelCase : Dict = trainer.callback_handler.callbacks[0]
__lowerCamelCase : Dict = trainer.pop_callback(A__ )
self.assertEqual(A__ , A__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A__ )
trainer.add_callback(A__ )
expected_callbacks.insert(0 , A__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A__ )
def a_ ( self : str ):
"""simple docstring"""
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=A__ )
__lowerCamelCase : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
__lowerCamelCase : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A__ , self.get_expected_events(A__ ) )
# Independent log/save/eval
__lowerCamelCase : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
__lowerCamelCase : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A__ , self.get_expected_events(A__ ) )
__lowerCamelCase : str = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
__lowerCamelCase : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A__ , self.get_expected_events(A__ ) )
__lowerCamelCase : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""" )
trainer.train()
__lowerCamelCase : List[Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A__ , self.get_expected_events(A__ ) )
__lowerCamelCase : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""" )
trainer.train()
__lowerCamelCase : Any = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A__ , self.get_expected_events(A__ ) )
# A bit of everything
__lowerCamelCase : Any = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
__lowerCamelCase : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A__ , self.get_expected_events(A__ ) )
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock:
__lowerCamelCase : Union[str, Any] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(A__ ) in warn_mock.call_args[0][0]
| 720 |
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowercase (_lowercase, _lowercase, _lowercase ) -> Optional[Any]:
"""simple docstring"""
# Initialise PyTorch model
__lowerCamelCase : str = RemBertConfig.from_json_file(_lowercase )
print("""Building PyTorch model from configuration: {}""".format(str(_lowercase ) ) )
__lowerCamelCase : List[Any] = RemBertModel(_lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_lowercase, _lowercase, _lowercase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(_lowercase ) )
torch.save(model.state_dict(), _lowercase )
if __name__ == "__main__":
UpperCAmelCase__ :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--rembert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained RemBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase__ :List[Any] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 483 | 0 |
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
UpperCAmelCase_ = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation="""relu""")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation="""relu"""))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation="""relu"""))
classifier.add(layers.Dense(units=1, activation="""sigmoid"""))
# Compiling the CNN
classifier.compile(
optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
UpperCAmelCase_ = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
UpperCAmelCase_ = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
UpperCAmelCase_ = train_datagen.flow_from_directory(
"""dataset/training_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary"""
)
UpperCAmelCase_ = test_datagen.flow_from_directory(
"""dataset/test_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary"""
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save("""cnn.h5""")
# Part 3 - Making new predictions
UpperCAmelCase_ = tf.keras.preprocessing.image.load_img(
"""dataset/single_prediction/image.png""", target_size=(6_4, 6_4)
)
UpperCAmelCase_ = tf.keras.preprocessing.image.img_to_array(test_image)
UpperCAmelCase_ = np.expand_dims(test_image, axis=0)
UpperCAmelCase_ = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
UpperCAmelCase_ = """Normal"""
if result[0][0] == 1:
UpperCAmelCase_ = """Abnormality detected"""
| 2 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BlenderbotSmallConfig
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = """gelu"""
def __init__(self , __a , __a=13 , __a=7 , __a=True , __a=False , __a=99 , __a=32 , __a=2 , __a=4 , __a=37 , __a=0.1 , __a=0.1 , __a=20 , __a=2 , __a=1 , __a=0 , ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = eos_token_id
UpperCAmelCase__ = pad_token_id
UpperCAmelCase__ = bos_token_id
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase__ = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase__ = prepare_blenderbot_small_inputs_dict(__a , __a , __a )
return config, inputs_dict
def UpperCamelCase__ (self , __a , __a ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFBlenderbotSmallModel(config=__a ).get_decoder()
UpperCAmelCase__ = inputs_dict['input_ids']
UpperCAmelCase__ = input_ids[:1, :]
UpperCAmelCase__ = inputs_dict['attention_mask'][:1, :]
UpperCAmelCase__ = inputs_dict['head_mask']
UpperCAmelCase__ = 1
# first forward pass
UpperCAmelCase__ = model(__a , attention_mask=__a , head_mask=__a , use_cache=__a )
UpperCAmelCase__ , UpperCAmelCase__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase__ = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase__ = model(__a , attention_mask=__a )[0]
UpperCAmelCase__ = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase__ = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1E-3 )
def UpperCamelCase_( snake_case__: Any , snake_case__: List[str] , snake_case__: Dict , snake_case__: Any=None , snake_case__: int=None , snake_case__: int=None , snake_case__: int=None , snake_case__: Optional[int]=None , ) -> int:
if attention_mask is None:
UpperCAmelCase__ = tf.cast(tf.math.not_equal(snake_case__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__SCREAMING_SNAKE_CASE = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE = (
{
"""conversational""": TFBlenderbotSmallForConditionalGeneration,
"""feature-extraction""": TFBlenderbotSmallModel,
"""summarization""": TFBlenderbotSmallForConditionalGeneration,
"""text2text-generation""": TFBlenderbotSmallForConditionalGeneration,
"""translation""": TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase__ (self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFBlenderbotSmallModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__a )
def UpperCamelCase__ (self ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ (self ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
@require_tokenizers
@require_tf
class lowercase ( unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [
"""Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like """
""" i'm going to throw up.\nand why is that?"""
]
__SCREAMING_SNAKE_CASE = """facebook/blenderbot_small-90M"""
@cached_property
def UpperCamelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
return BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
@cached_property
def UpperCamelCase__ (self ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase__ (self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.tokenizer(self.src_text , return_tensors='tf' )
UpperCAmelCase__ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__a , )
UpperCAmelCase__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__a )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 146 | 0 |
'''simple docstring'''
def __lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int ):
'''simple docstring'''
return int((input_a, input_a).count(0 ) != 0 )
def __lowerCamelCase ( ):
'''simple docstring'''
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 710 | '''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowercase__ : Tuple = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def __lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : int ):
'''simple docstring'''
inspect_dataset(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ = path + '''.py'''
assert script_name in os.listdir(_UpperCamelCase )
assert "__pycache__" not in os.listdir(_UpperCamelCase )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def __lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
inspect_metric(_UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ = path + '''.py'''
assert script_name in os.listdir(_UpperCamelCase )
assert "__pycache__" not in os.listdir(_UpperCamelCase )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def __lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = get_dataset_config_info(_UpperCamelCase , config_name=_UpperCamelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
with pytest.raises(_UpperCamelCase ):
get_dataset_config_info(_UpperCamelCase , config_name=_UpperCamelCase )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = get_dataset_config_names(_UpperCamelCase )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : int ):
'''simple docstring'''
UpperCAmelCase_ = get_dataset_infos(_UpperCamelCase )
assert list(infos.keys() ) == expected_configs
UpperCAmelCase_ = expected_configs[0]
assert expected_config in infos
UpperCAmelCase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def __lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = get_dataset_infos(_UpperCamelCase )
assert expected_config in infos
UpperCAmelCase_ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] ):
'''simple docstring'''
with pytest.raises(_UpperCamelCase ):
get_dataset_split_names(_UpperCamelCase , config_name=_UpperCamelCase )
| 43 | 0 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'''The `image_to_image.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionImg2ImgPipeline` instead.'''
) | 561 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCAmelCase :Optional[int] = get_tests_dir('''fixtures''')
lowerCAmelCase :Any = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
lowerCAmelCase :Tuple = get_tests_dir('''fixtures/dummy-config.json''')
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Union[str, Any] ) -> int:
__magic_name__ : str = 0
def __lowerCAmelCase ( self : int ) -> Tuple:
__magic_name__ : List[str] = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(_A , _A )
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
__magic_name__ : List[str] = AutoFeatureExtractor.from_pretrained(_A )
self.assertIsInstance(_A , _A )
def __lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
__magic_name__ : Union[str, Any] = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
__magic_name__ : Dict = AutoFeatureExtractor.from_pretrained(_A ).to_dict()
config_dict.pop('feature_extractor_type' )
__magic_name__ : int = WavaVecaFeatureExtractor(**_A )
# save in new folder
model_config.save_pretrained(_A )
config.save_pretrained(_A )
__magic_name__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(_A )
# make sure private variable is not incorrectly saved
__magic_name__ : List[str] = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(_A , _A )
def __lowerCAmelCase ( self : int ) -> Union[str, Any]:
__magic_name__ : Tuple = AutoFeatureExtractor.from_pretrained(_A )
self.assertIsInstance(_A , _A )
def __lowerCAmelCase ( self : List[str] ) -> Optional[int]:
with self.assertRaisesRegex(
_A , 'bert-base is not a local folder and is not a valid model identifier' ):
__magic_name__ : str = AutoFeatureExtractor.from_pretrained('bert-base' )
def __lowerCAmelCase ( self : Any ) -> Tuple:
with self.assertRaisesRegex(
_A , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
__magic_name__ : Tuple = AutoFeatureExtractor.from_pretrained(_A , revision='aaaaaa' )
def __lowerCAmelCase ( self : Dict ) -> str:
with self.assertRaisesRegex(
_A , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
__magic_name__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_A ):
__magic_name__ : Dict = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_A ):
__magic_name__ : Optional[Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_A )
__magic_name__ : List[Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_A )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_A )
__magic_name__ : List[Any] = AutoFeatureExtractor.from_pretrained(_A , trust_remote_code=_A )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def __lowerCAmelCase ( self : str ) -> Tuple:
try:
AutoConfig.register('custom' , _A )
AutoFeatureExtractor.register(_A , _A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A ):
AutoFeatureExtractor.register(_A , _A )
# Now that the config is registered, it can be used as any other config with the auto-API
__magic_name__ : str = CustomFeatureExtractor.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(_A )
__magic_name__ : List[Any] = AutoFeatureExtractor.from_pretrained(_A )
self.assertIsInstance(_A , _A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def __lowerCAmelCase ( self : Optional[Any] ) -> Dict:
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Any = True
try:
AutoConfig.register('custom' , _A )
AutoFeatureExtractor.register(_A , _A )
# If remote code is not set, the default is to use local
__magic_name__ : Optional[Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
__magic_name__ : str = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_A )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
__magic_name__ : Tuple = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=_A )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(_A , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] | 561 | 1 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _UpperCamelCase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_12 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = parent
__lowercase = 13
__lowercase = 7
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = 99
__lowercase = 3_84
__lowercase = 2
__lowercase = 4
__lowercase = 37
__lowercase = '''gelu'''
__lowercase = 0.1
__lowercase = 0.1
__lowercase = 5_12
__lowercase = 16
__lowercase = 2
__lowercase = 0.02
__lowercase = 3
__lowercase = 4
__lowercase = 1_28
__lowercase = 2
__lowercase = 9
__lowercase = 1
__lowercase = None
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowerCAmelCase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
__lowercase = TFConvBertModel(config=lowerCAmelCase__ )
__lowercase = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase = [input_ids, input_mask]
__lowercase = model(lowerCAmelCase__ )
__lowercase = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = TFConvBertForMaskedLM(config=lowerCAmelCase__ )
__lowercase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__lowercase = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = TFConvBertForSequenceClassification(config=lowerCAmelCase__ )
__lowercase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__lowercase = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
__lowercase = self.num_choices
__lowercase = TFConvBertForMultipleChoice(config=lowerCAmelCase__ )
__lowercase = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
__lowercase = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__lowercase = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = TFConvBertForTokenClassification(config=lowerCAmelCase__ )
__lowercase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__lowercase = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
__lowercase = TFConvBertForQuestionAnswering(config=lowerCAmelCase__ )
__lowercase = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__lowercase = model(lowerCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _UpperCamelCase ( _UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
__a : Optional[Any] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__a : Optional[int] = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__a : Union[str, Any] = False
__a : List[str] = False
__a : int = False
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase = TFConvBertModelTester(self )
__lowercase = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
@slow
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = True
if hasattr(lowerCAmelCase__ , '''use_cache''' ):
__lowercase = True
__lowercase = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
__lowercase = getattr(self.model_tester , '''key_length''' , lowerCAmelCase__ )
for model_class in self.all_model_classes:
__lowercase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase = model_class(lowerCAmelCase__ )
__lowercase = len(model(lowerCAmelCase__ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase__ , saved_model=lowerCAmelCase__ )
__lowercase = os.path.join(lowerCAmelCase__ , '''saved_model''' , '''1''' )
__lowercase = tf.keras.models.load_model(lowerCAmelCase__ )
__lowercase = model(lowerCAmelCase__ )
if self.is_encoder_decoder:
__lowercase = outputs['''encoder_hidden_states''']
__lowercase = outputs['''encoder_attentions''']
else:
__lowercase = outputs['''hidden_states''']
__lowercase = outputs['''attentions''']
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
__lowercase = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
self.assertIsNotNone(lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = getattr(self.model_tester , '''decoder_seq_length''' , self.model_tester.seq_length )
__lowercase = getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
__lowercase = getattr(self.model_tester , '''key_length''' , lowerCAmelCase__ )
__lowercase = getattr(self.model_tester , '''key_length''' , lowerCAmelCase__ )
def check_decoder_attentions_output(lowerCAmelCase__ ):
__lowercase = len(lowerCAmelCase__ )
self.assertEqual(out_len % 2 , 0 )
__lowercase = outputs.decoder_attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(lowerCAmelCase__ ):
__lowercase = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowercase = True
__lowercase = False
__lowercase = model_class(lowerCAmelCase__ )
__lowercase = model(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
__lowercase = len(lowerCAmelCase__ )
self.assertEqual(config.output_hidden_states , lowerCAmelCase__ )
check_encoder_attentions_output(lowerCAmelCase__ )
if self.is_encoder_decoder:
__lowercase = model_class(lowerCAmelCase__ )
__lowercase = model(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(config.output_hidden_states , lowerCAmelCase__ )
check_decoder_attentions_output(lowerCAmelCase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowercase = True
__lowercase = model_class(lowerCAmelCase__ )
__lowercase = model(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(config.output_hidden_states , lowerCAmelCase__ )
check_encoder_attentions_output(lowerCAmelCase__ )
# Check attention is always last and order is fine
__lowercase = True
__lowercase = True
__lowercase = model_class(lowerCAmelCase__ )
__lowercase = model(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowerCAmelCase__ ) )
self.assertEqual(model.config.output_hidden_states , lowerCAmelCase__ )
check_encoder_attentions_output(lowerCAmelCase__ )
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase = TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
__lowercase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowercase = model(lowerCAmelCase__ )[0]
__lowercase = [1, 6, 7_68]
self.assertEqual(output.shape , lowerCAmelCase__ )
__lowercase = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) | 522 | import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = tempfile.mkdtemp()
__lowercase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__lowercase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__lowercase = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4814_5466, 0.457_8275, 0.4082_1073],
'''image_std''': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
__lowercase = os.path.join(self.tmpdirname , lowerCAmelCase__ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__ ) -> int:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowercase = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase = self.get_rust_tokenizer()
__lowercase = self.get_image_processor()
__lowercase = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
__lowercase = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase__ )
__lowercase = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
__lowercase = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowercase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowercase = self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0 )
__lowercase = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowerCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__lowercase = self.prepare_image_inputs()
__lowercase = image_processor(lowerCAmelCase__ , return_tensors='''np''' )
__lowercase = processor(images=lowerCAmelCase__ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__lowercase = '''lower newer'''
__lowercase = processor(text=lowerCAmelCase__ )
__lowercase = tokenizer(lowerCAmelCase__ , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__lowercase = '''lower newer'''
__lowercase = self.prepare_image_inputs()
__lowercase = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowercase = processor.batch_decode(lowerCAmelCase__ )
__lowercase = tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = AlignProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
__lowercase = '''lower newer'''
__lowercase = self.prepare_image_inputs()
__lowercase = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 522 | 1 |
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase__ = logging.get_logger(__name__)
class __snake_case ( _lowercase):
snake_case__ : Dict = ["audio_values", "audio_mask"]
def __init__( self : List[str] , __lowerCAmelCase : Optional[Any]=2_0_4_8 , __lowerCAmelCase : Optional[int]=1 , __lowerCAmelCase : Dict=[1_6, 1_6] , __lowerCAmelCase : Optional[Any]=1_2_8 , __lowerCAmelCase : Optional[int]=4_4_1_0_0 , __lowerCAmelCase : Optional[Any]=8_6 , __lowerCAmelCase : Dict=2_0_4_8 , __lowerCAmelCase : Tuple=0.0 , **__lowerCAmelCase : Dict , ):
"""simple docstring"""
super().__init__(
feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , **__lowerCAmelCase , )
_lowerCamelCase : str = spectrogram_length
_lowerCamelCase : List[str] = num_channels
_lowerCamelCase : List[str] = patch_size
_lowerCamelCase : Optional[Any] = feature_size // self.patch_size[1]
_lowerCamelCase : Any = n_fft
_lowerCamelCase : int = sampling_rate // hop_length_to_sampling_rate
_lowerCamelCase : Optional[int] = sampling_rate
_lowerCamelCase : Optional[Any] = padding_value
_lowerCamelCase : str = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=__lowerCAmelCase , norm='''slaney''' , mel_scale='''slaney''' , ).T
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : np.array ):
"""simple docstring"""
_lowerCamelCase : Tuple = spectrogram(
__lowerCAmelCase , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , )
_lowerCamelCase : Union[str, Any] = log_spec[:, :-1]
_lowerCamelCase : Optional[Any] = log_spec - 20.0
_lowerCamelCase : List[Any] = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Optional[Any] , __lowerCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : Optional[bool] = True , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , **__lowerCAmelCase : List[str] , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'''This feature extractor is set to support sampling rate'''
f''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
f''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
_lowerCamelCase : Union[str, Any] = isinstance(__lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
_lowerCamelCase : List[str] = is_batched_numpy or (
isinstance(__lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowerCamelCase : str = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray ):
_lowerCamelCase : Any = np.asarray(__lowerCAmelCase , dtype=np.floataa )
elif isinstance(__lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowerCamelCase : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowerCamelCase : Tuple = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
_lowerCamelCase : int = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , __lowerCAmelCase ):
_lowerCamelCase : Union[str, Any] = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
_lowerCamelCase : List[Any] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
_lowerCamelCase : Any = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
_lowerCamelCase : str = np.array(__lowerCAmelCase ).astype(np.floataa )
# convert into correct format for padding
_lowerCamelCase : List[str] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
_lowerCamelCase : Optional[int] = np.ones([len(__lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
_lowerCamelCase : int = padded_audio_features * self.padding_value
for i in range(len(__lowerCAmelCase ) ):
_lowerCamelCase : List[str] = audio_features[i]
_lowerCamelCase : Optional[Any] = feature
# return as BatchFeature
if return_attention_mask:
_lowerCamelCase : Union[str, Any] = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
_lowerCamelCase : Any = {'''audio_values''': padded_audio_features}
_lowerCamelCase : Optional[int] = BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase )
return encoded_inputs
| 83 |
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class a :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=False , _snake_case=True , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=5_12 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ):
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , use_stable_embedding=_snake_case , )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
lowerCAmelCase = OpenLlamaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case )
lowerCAmelCase = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = OpenLlamaModel(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , )
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , )
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = OpenLlamaForCausalLM(config=_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = OpenLlamaForCausalLM(config=_snake_case )
model.to(_snake_case )
model.eval()
# first forward pass
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , use_cache=_snake_case , )
lowerCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , output_hidden_states=_snake_case , )['hidden_states'][0]
lowerCAmelCase = model(
_snake_case , attention_mask=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , past_key_values=_snake_case , output_hidden_states=_snake_case , )['hidden_states'][0]
# select random slice
lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-3 ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,(
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a ( a__ , a__ , a__ , unittest.TestCase ):
snake_case__ = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
snake_case__ = (OpenLlamaForCausalLM,) if is_torch_available() else ()
snake_case__ = (
{
'''feature-extraction''': OpenLlamaModel,
'''text-classification''': OpenLlamaForSequenceClassification,
'''text-generation''': OpenLlamaForCausalLM,
'''zero-shot''': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ = False
snake_case__ = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = OpenLlamaModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase = type
self.model_tester.create_and_check_model(*_snake_case )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(_snake_case )
lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = 'single_label_classification'
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(_snake_case )
lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = 'multi_label_classification'
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(_snake_case )
lowerCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase = OpenLlamaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowerCAmelCase = model(_snake_case , attention_mask=_snake_case , labels=_snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('Open-Llama buffers include complex numbers, which breaks this test' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase ,lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = ids_tensor([1, 10] , config.vocab_size )
lowerCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase = OpenLlamaModel(_snake_case )
original_model.to(_snake_case )
original_model.eval()
lowerCAmelCase = original_model(_snake_case ).last_hidden_state
lowerCAmelCase = original_model(_snake_case ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowerCAmelCase = {'type': scaling_type, 'factor': 10.0}
lowerCAmelCase = OpenLlamaModel(_snake_case )
scaled_model.to(_snake_case )
scaled_model.eval()
lowerCAmelCase = scaled_model(_snake_case ).last_hidden_state
lowerCAmelCase = scaled_model(_snake_case ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_snake_case , _snake_case , atol=1E-5 ) )
| 4 | 0 |
"""simple docstring"""
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
__A : Union[str, Any] = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : str = 'vision-encoder-decoder'
__UpperCAmelCase : Union[str, Any] = True
def __init__( self : Optional[Any] , **lowercase__ : int ):
super().__init__(**lowercase__ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
f'A configuraton of type {self.model_type} cannot be instantiated because '
f'not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}' )
__lowercase : int = kwargs.pop("encoder" )
__lowercase : Optional[int] = encoder_config.pop("model_type" )
__lowercase : Union[str, Any] = kwargs.pop("decoder" )
__lowercase : str = decoder_config.pop("model_type" )
__lowercase : List[str] = AutoConfig.for_model(lowercase__ , **lowercase__ )
__lowercase : Tuple = AutoConfig.for_model(lowercase__ , **lowercase__ )
__lowercase : Optional[Any] = True
@classmethod
def snake_case ( cls : Tuple , lowercase__ : PretrainedConfig , lowercase__ : PretrainedConfig , **lowercase__ : Any ):
logger.info("Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
__lowercase : Any = True
__lowercase : Optional[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowercase__ )
def snake_case ( self : Optional[Any] ):
__lowercase : Tuple = copy.deepcopy(self.__dict__ )
__lowercase : List[Any] = self.encoder.to_dict()
__lowercase : Tuple = self.decoder.to_dict()
__lowercase : Union[str, Any] = self.__class__.model_type
return output
class lowerCAmelCase__ ( UpperCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Any = version.parse("1.11" )
@property
def snake_case ( self : Dict ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def snake_case ( self : int ):
return 1e-4
@property
def snake_case ( self : List[Any] ):
return OrderedDict({"last_hidden_state": {0: "batch", 1: "encoder_sequence"}} )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
"""simple docstring"""
@property
def snake_case ( self : Union[str, Any] ):
__lowercase : Any = OrderedDict()
__lowercase : Optional[Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
__lowercase : Dict = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
__lowercase : int = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def snake_case ( self : List[Any] , lowercase__ : "PreTrainedTokenizerBase" , lowercase__ : int = -1 , lowercase__ : int = -1 , lowercase__ : bool = False , lowercase__ : Optional["TensorType"] = None , ):
import torch
__lowercase : Optional[Any] = OrderedDict()
__lowercase : Dict = super().generate_dummy_inputs(
lowercase__ , batch_size=lowercase__ , seq_length=lowercase__ , is_pair=lowercase__ , framework=lowercase__ )
__lowercase : int = dummy_input["""input_ids"""].shape
__lowercase : Any = (batch, encoder_sequence, self._config.encoder_hidden_size)
__lowercase : List[str] = dummy_input.pop("input_ids" )
__lowercase : Tuple = dummy_input.pop("attention_mask" )
__lowercase : Tuple = torch.zeros(lowercase__ )
return common_inputs
class lowerCAmelCase__ ( UpperCAmelCase__ ):
"""simple docstring"""
@property
def snake_case ( self : Tuple ):
pass
def snake_case ( self : str , lowercase__ : PretrainedConfig ):
return VisionEncoderDecoderEncoderOnnxConfig(lowercase__ )
def snake_case ( self : Dict , lowercase__ : PretrainedConfig , lowercase__ : PretrainedConfig , lowercase__ : str = "default" ):
__lowercase : int = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(lowercase__ , lowercase__ )
| 714 |
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Tuple , lowercase__ : Tuple , lowercase__ : Tuple=1_3 , lowercase__ : Optional[int]=7 , lowercase__ : List[str]=False , lowercase__ : Tuple=True , lowercase__ : int=False , lowercase__ : List[str]=False , lowercase__ : Optional[Any]=1_9 , lowercase__ : int=3_2 , lowercase__ : List[Any]=5 , lowercase__ : Optional[int]=4 , lowercase__ : Any=3_7 , lowercase__ : Tuple="gelu" , lowercase__ : int=0.1 , lowercase__ : Tuple=0.1 , lowercase__ : List[Any]=5_1_2 , lowercase__ : List[Any]=1_6 , lowercase__ : Union[str, Any]=2 , lowercase__ : List[str]=0.0_2 , lowercase__ : List[Any]=3 , lowercase__ : Any=4 , lowercase__ : Optional[Any]=None , ):
__lowercase : int = parent
__lowercase : Tuple = batch_size
__lowercase : Optional[int] = seq_length
__lowercase : str = is_training
__lowercase : List[Any] = use_input_mask
__lowercase : Any = use_token_type_ids
__lowercase : str = use_labels
__lowercase : Dict = vocab_size
__lowercase : Optional[int] = hidden_size
__lowercase : List[Any] = num_hidden_layers
__lowercase : int = num_attention_heads
__lowercase : Union[str, Any] = intermediate_size
__lowercase : Dict = hidden_act
__lowercase : Union[str, Any] = hidden_dropout_prob
__lowercase : Optional[int] = attention_probs_dropout_prob
__lowercase : Optional[Any] = max_position_embeddings
__lowercase : List[str] = type_vocab_size
__lowercase : str = type_sequence_label_size
__lowercase : List[str] = initializer_range
__lowercase : Optional[Any] = num_labels
__lowercase : Tuple = num_choices
__lowercase : Optional[Any] = scope
def snake_case ( self : Optional[int] ):
__lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Union[str, Any] = None
if self.use_input_mask:
__lowercase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Dict = None
__lowercase : Any = None
__lowercase : Optional[Any] = None
if self.use_labels:
__lowercase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : Optional[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self : Any ):
__lowercase : Dict = EsmConfig(
vocab_size=3_3 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=lowercase__ , esmfold_config={"trunk": {"num_blocks": 2}, "fp16_esm": False} , )
return config
def snake_case ( self : str , lowercase__ : List[Any] , lowercase__ : Optional[int] , lowercase__ : List[Any] , lowercase__ : int , lowercase__ : List[Any] , lowercase__ : str ):
__lowercase : List[Any] = EsmForProteinFolding(config=lowercase__ ).float()
model.to(lowercase__ )
model.eval()
__lowercase : List[str] = model(lowercase__ , attention_mask=lowercase__ )
__lowercase : Any = model(lowercase__ )
__lowercase : Any = model(lowercase__ )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 1_4, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def snake_case ( self : Tuple ):
__lowercase : List[str] = self.prepare_config_and_inputs()
(
(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,
) : Any = config_and_inputs
__lowercase : int = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : List[Any] = (EsmForProteinFolding,) if is_torch_available() else ()
__UpperCAmelCase : int = ()
__UpperCAmelCase : Tuple = {} if is_torch_available() else {}
__UpperCAmelCase : Optional[Any] = False
def snake_case ( self : Tuple ):
__lowercase : Tuple = EsmFoldModelTester(self )
__lowercase : Dict = ConfigTester(self , config_class=lowercase__ , hidden_size=3_7 )
def snake_case ( self : Dict ):
self.config_tester.run_common_tests()
def snake_case ( self : Optional[int] ):
__lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
@unittest.skip("Does not support attention outputs" )
def snake_case ( self : Union[str, Any] ):
pass
@unittest.skip
def snake_case ( self : Tuple ):
pass
@unittest.skip("Esm does not support embedding resizing" )
def snake_case ( self : Optional[int] ):
pass
@unittest.skip("Esm does not support embedding resizing" )
def snake_case ( self : List[str] ):
pass
@unittest.skip("ESMFold does not support passing input embeds!" )
def snake_case ( self : int ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def snake_case ( self : List[Any] ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def snake_case ( self : Any ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def snake_case ( self : Optional[Any] ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def snake_case ( self : List[str] ):
pass
@unittest.skip("ESMFold does not support head pruning." )
def snake_case ( self : List[str] ):
pass
@unittest.skip("ESMFold does not output hidden states in the normal way." )
def snake_case ( self : Optional[Any] ):
pass
@unittest.skip("ESMfold does not output hidden states in the normal way." )
def snake_case ( self : Optional[Any] ):
pass
@unittest.skip("ESMFold only has one output format." )
def snake_case ( self : Tuple ):
pass
@unittest.skip("This test doesn't work for ESMFold and doesn't test core functionality" )
def snake_case ( self : Any ):
pass
@unittest.skip("ESMFold does not support input chunking." )
def snake_case ( self : str ):
pass
@unittest.skip("ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments." )
def snake_case ( self : Any ):
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def snake_case ( self : Any ):
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def snake_case ( self : Tuple ):
pass
@unittest.skip("ESMFold doesn't support torchscript compilation." )
def snake_case ( self : List[str] ):
pass
@unittest.skip("ESMFold doesn't support data parallel." )
def snake_case ( self : Optional[Any] ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def snake_case ( self : Any ):
pass
@require_torch
class lowerCAmelCase__ ( lowerCAmelCase_ ):
"""simple docstring"""
@slow
def snake_case ( self : Union[str, Any] ):
__lowercase : Tuple = EsmForProteinFolding.from_pretrained("facebook/esmfold_v1" ).float()
model.eval()
__lowercase : Optional[int] = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
__lowercase : str = model(lowercase__ )["positions"]
__lowercase : Union[str, Any] = torch.tensor([2.5_8_2_8, 0.7_9_9_3, -1_0.9_3_3_4] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , lowercase__ , atol=1e-4 ) )
| 281 | 0 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
UpperCamelCase = logging.getLogger(__name__)
UpperCamelCase = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : Optional[str] = field(
default=lowercase , metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} , )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(lowercase )} , )
_snake_case : Optional[str] = field(
default=lowercase , metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} , )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
_snake_case : bool = field(
default=lowercase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
_snake_case : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
_snake_case : bool = field(
default=lowercase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def __a ( self :Tuple ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"""--config_overrides can't be used in combination with --config_name or --model_name_or_path""" )
@dataclass
class lowerCAmelCase_ :
"""simple docstring"""
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
_snake_case : Optional[str] = field(default=lowercase , metadata={"""help""": """The input training data file (a text file)."""} )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """An optional input train ref data file for whole word masking in Chinese."""} , )
_snake_case : Optional[str] = field(
default=lowercase , metadata={"""help""": """An optional input validation ref data file for whole word masking in Chinese."""} , )
_snake_case : bool = field(
default=lowercase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
_snake_case : Optional[int] = field(
default=5 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
_snake_case : Optional[int] = field(
default=lowercase , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated. Default to the max input length of the model."""
)
} , )
_snake_case : Optional[int] = field(
default=lowercase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
_snake_case : float = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
_snake_case : bool = field(
default=lowercase , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
def __a ( self :Dict ):
if self.train_file is not None:
UpperCamelCase__ :Optional[Any] = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
UpperCamelCase__ :Optional[int] = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def A ( lowercase__ : Optional[Any] , lowercase__ : str ) -> List[Any]:
with open(lowercase__ , """r""" , encoding="""utf-8""" ) as f:
UpperCamelCase__ :Dict = [json.loads(lowercase__ ) for line in f.read().splitlines() if (len(lowercase__ ) > 0 and not line.isspace())]
assert len(lowercase__ ) == len(lowercase__ )
UpperCamelCase__ :int = {c: dataset[c] for c in dataset.column_names}
UpperCamelCase__ :List[Any] = refs
return Dataset.from_dict(lowercase__ )
def A ( ) -> Dict:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase__ :Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Dict = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
UpperCamelCase__ :int = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ :Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowercase__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
UpperCamelCase__ :List[str] = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
UpperCamelCase__ :Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""train[:{data_args.validation_split_percentage}%]""" , )
UpperCamelCase__ :Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"""train[{data_args.validation_split_percentage}%:]""" , )
else:
UpperCamelCase__ :Union[str, Any] = {}
if data_args.train_file is not None:
UpperCamelCase__ :List[Any] = data_args.train_file
if data_args.validation_file is not None:
UpperCamelCase__ :str = data_args.validation_file
UpperCamelCase__ :Tuple = data_args.train_file.split(""".""" )[-1]
if extension == "txt":
UpperCamelCase__ :List[str] = """text"""
UpperCamelCase__ :Optional[int] = load_dataset(lowercase__ , data_files=lowercase__ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase__ :Union[str, Any] = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCamelCase__ :List[str] = AutoConfig.from_pretrained(model_args.config_name , **lowercase__ )
elif model_args.model_name_or_path:
UpperCamelCase__ :Union[str, Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
UpperCamelCase__ :Union[str, Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
UpperCamelCase__ :Union[str, Any] = {
"""cache_dir""": model_args.cache_dir,
"""use_fast""": model_args.use_fast_tokenizer,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
UpperCamelCase__ :Optional[int] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowercase__ )
elif model_args.model_name_or_path:
UpperCamelCase__ :Any = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name.""" )
if model_args.model_name_or_path:
UpperCamelCase__ :Tuple = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
UpperCamelCase__ :Optional[Any] = AutoModelForMaskedLM.from_config(lowercase__ )
model.resize_token_embeddings(len(lowercase__ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
UpperCamelCase__ :Dict = datasets["""train"""].column_names
else:
UpperCamelCase__ :str = datasets["""validation"""].column_names
UpperCamelCase__ :Optional[int] = """text""" if """text""" in column_names else column_names[0]
UpperCamelCase__ :str = """max_length""" if data_args.pad_to_max_length else False
def tokenize_function(lowercase__ : str ):
# Remove empty lines
UpperCamelCase__ :List[str] = [line for line in examples["""text"""] if len(lowercase__ ) > 0 and not line.isspace()]
return tokenizer(examples["""text"""] , padding=lowercase__ , truncation=lowercase__ , max_length=data_args.max_seq_length )
UpperCamelCase__ :int = datasets.map(
lowercase__ , batched=lowercase__ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
UpperCamelCase__ :Tuple = add_chinese_references(tokenized_datasets["""train"""] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
UpperCamelCase__ :Tuple = add_chinese_references(
tokenized_datasets["""validation"""] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
UpperCamelCase__ :Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
UpperCamelCase__ :List[str] = False
# Data collator
# This one will take care of randomly masking the tokens.
UpperCamelCase__ :str = DataCollatorForWholeWordMask(tokenizer=lowercase__ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
UpperCamelCase__ :Union[str, Any] = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None , eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
UpperCamelCase__ :List[Any] = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
UpperCamelCase__ :int = model_args.model_name_or_path
else:
UpperCamelCase__ :Optional[Any] = None
UpperCamelCase__ :List[Any] = trainer.train(resume_from_checkpoint=lowercase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCamelCase__ :int = os.path.join(training_args.output_dir , """train_results.txt""" )
if trainer.is_world_process_zero():
with open(lowercase__ , """w""" ) as writer:
logger.info("""***** Train results *****""" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# Evaluation
UpperCamelCase__ :Optional[Any] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
UpperCamelCase__ :str = trainer.evaluate()
UpperCamelCase__ :Dict = math.exp(eval_output["""eval_loss"""] )
UpperCamelCase__ :int = perplexity
UpperCamelCase__ :Union[str, Any] = os.path.join(training_args.output_dir , """eval_results_mlm_wwm.txt""" )
if trainer.is_world_process_zero():
with open(lowercase__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in sorted(results.items() ):
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
return results
def A ( lowercase__ : Tuple ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 45 |
"""simple docstring"""
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
if digit_amount > 0:
return round(number - int(__UpperCamelCase ) , __UpperCamelCase )
return number - int(__UpperCamelCase )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 76 | 0 |
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
_snake_case = features.copy() if features else default_expected_features
_snake_case = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case = TextDatasetReader(SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
_snake_case = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_snake_case = text_path
elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
_snake_case = [text_path]
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
_snake_case = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=("train",) ):
'''simple docstring'''
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for split in splits:
_snake_case = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_snake_case = TextDatasetReader({"train": text_path} , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
_snake_case = {"text": "string"}
_snake_case = features.copy() if features else default_expected_features
_snake_case = (
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
_snake_case = TextDatasetReader({"train": text_path} , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if split:
_snake_case = {split: text_path}
else:
_snake_case = "train"
_snake_case = {"train": text_path, "test": text_path}
_snake_case = tmp_path / "cache"
_snake_case = {"text": "string"}
_snake_case = TextDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 705 |
'''simple docstring'''
import math
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
_snake_case = 0.0
_snake_case = 0.0
for i in range(len(lowerCamelCase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
for i in range(len(lowerCamelCase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def snake_case_ ( ):
'''simple docstring'''
_snake_case = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_snake_case = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_snake_case = SelfOrganizingMap()
_snake_case = 3
_snake_case = 0.5
for _ in range(SCREAMING_SNAKE_CASE__ ):
for j in range(len(SCREAMING_SNAKE_CASE__ ) ):
# training sample
_snake_case = training_samples[j]
# Compute the winning vector
_snake_case = self_organizing_map.get_winner(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Update the winning vector
_snake_case = self_organizing_map.update(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# classify test sample
_snake_case = [0, 0, 0, 1]
_snake_case = self_organizing_map.get_winner(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# results
print(f'''Clusters that the test sample belongs to : {winner}''' )
print(f'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 368 | 0 |
"""simple docstring"""
def __UpperCamelCase ( snake_case__ ):
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
A_ : Dict = 4
A_ : int = (1 << p) - 1
for _ in range(p - 2 ):
A_ : Union[str, Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 180 |
"""simple docstring"""
import math
import os
import sys
def __UpperCamelCase ( snake_case__ ):
A_ : Optional[Any] = """"""
try:
with open(snake_case__ , """rb""" ) as binary_file:
A_ : Union[str, Any] = binary_file.read()
for dat in data:
A_ : Dict = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lexicon.pop(snake_case__ )
A_ : List[str] = last_match_id
if math.loga(snake_case__ ).is_integer():
for curr_key in lexicon:
A_ : Dict = """0""" + lexicon[curr_key]
A_ : int = bin(snake_case__ )[2:]
def __UpperCamelCase ( snake_case__ ):
A_ : Dict = {"""0""": """0""", """1""": """1"""}
A_ , A_ : Optional[int] = """""", """"""
A_ : Tuple = len(snake_case__ )
for i in range(len(snake_case__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
A_ : List[str] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
index += 1
A_ : int = """"""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
A_ : Any = lexicon[curr_string]
result += last_match_id
return result
def __UpperCamelCase ( snake_case__ , snake_case__ ):
A_ : Optional[int] = os.path.getsize(snake_case__ )
A_ : Dict = bin(snake_case__ )[2:]
A_ : Optional[Any] = len(snake_case__ )
return "0" * (length_length - 1) + file_length_binary + compressed
def __UpperCamelCase ( snake_case__ , snake_case__ ):
A_ : Tuple = 8
try:
with open(snake_case__ , """wb""" ) as opened_file:
A_ : Dict = [
to_write[i : i + byte_length]
for i in range(0 , len(snake_case__ ) , snake_case__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(snake_case__ , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def __UpperCamelCase ( snake_case__ , snake_case__ ):
A_ : List[str] = read_file_binary(snake_case__ )
A_ : str = compress_data(snake_case__ )
A_ : int = add_file_length(snake_case__ , snake_case__ )
write_file_binary(snake_case__ , snake_case__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 180 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__a = {
'''configuration_chinese_clip''': [
'''CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ChineseCLIPConfig''',
'''ChineseCLIPOnnxConfig''',
'''ChineseCLIPTextConfig''',
'''ChineseCLIPVisionConfig''',
],
'''processing_chinese_clip''': ['''ChineseCLIPProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['''ChineseCLIPFeatureExtractor''']
__a = ['''ChineseCLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ChineseCLIPModel''',
'''ChineseCLIPPreTrainedModel''',
'''ChineseCLIPTextModel''',
'''ChineseCLIPVisionModel''',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 710 |
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : Dict = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
UpperCAmelCase_ : int = dict(zip(_SCREAMING_SNAKE_CASE ,range(len(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCAmelCase_ : List[Any] = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
UpperCAmelCase_ : Dict = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 16_000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
UpperCAmelCase_ : str = tempfile.mkdtemp()
UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ : List[str] = os.path.join(self.tmpdirname ,_SCREAMING_SNAKE_CASE )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.feature_extraction_file ,'''w''' ,encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + '''\n''' )
# load decoder from hub
UpperCAmelCase_ : str = '''hf-internal-testing/ngram-beam-search-decoder'''
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> Dict:
UpperCAmelCase_ : int = self.add_kwargs_tokens_map.copy()
kwargs.update(_SCREAMING_SNAKE_CASE )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> Any:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> Dict:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def a__ ( self ) -> int:
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : List[str] = self.get_feature_extractor()
UpperCAmelCase_ : Tuple = self.get_decoder()
UpperCAmelCase_ : Tuple = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : List[str] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_SCREAMING_SNAKE_CASE )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,_SCREAMING_SNAKE_CASE )
# decoder
self.assertEqual(processor.decoder._alphabet.labels ,decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set ,decoder.model_container[decoder._model_key]._unigram_set ,)
self.assertIsInstance(processor.decoder ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Tuple = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
UpperCAmelCase_ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname ,alpha=5.0 ,beta=3.0 ,score_boundary=-7.0 ,unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha ,5.0 )
self.assertEqual(processor.language_model.beta ,3.0 )
self.assertEqual(processor.language_model.score_boundary ,-7.0 )
self.assertEqual(processor.language_model.unk_score_offset ,3 )
def a__ ( self ) -> Dict:
UpperCAmelCase_ : str = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(_SCREAMING_SNAKE_CASE ,'''include''' ):
WavaVecaProcessorWithLM(
tokenizer=_SCREAMING_SNAKE_CASE ,feature_extractor=self.get_feature_extractor() ,decoder=self.get_decoder() )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Any = self.get_feature_extractor()
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : List[str] = self.get_decoder()
UpperCAmelCase_ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = floats_list((3, 1_000) )
UpperCAmelCase_ : Any = feature_extractor(_SCREAMING_SNAKE_CASE ,return_tensors='''np''' )
UpperCAmelCase_ : Any = processor(_SCREAMING_SNAKE_CASE ,return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = self.get_feature_extractor()
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
UpperCAmelCase_ : Optional[Any] = self.get_decoder()
UpperCAmelCase_ : str = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = '''This is a test string'''
UpperCAmelCase_ : Optional[int] = processor(text=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = tokenizer(_SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def a__ ( self ,_SCREAMING_SNAKE_CASE=(2, 10, 16) ,_SCREAMING_SNAKE_CASE=77 ) -> int:
np.random.seed(_SCREAMING_SNAKE_CASE )
return np.random.rand(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Dict:
UpperCAmelCase_ : int = self.get_feature_extractor()
UpperCAmelCase_ : Any = self.get_tokenizer()
UpperCAmelCase_ : Optional[Any] = self.get_decoder()
UpperCAmelCase_ : List[Any] = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = self._get_dummy_logits(shape=(10, 16) ,seed=13 )
UpperCAmelCase_ : Optional[Any] = processor.decode(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = decoder.decode_beams(_SCREAMING_SNAKE_CASE )[0]
self.assertEqual(decoded_decoder[0] ,decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' ,decoded_processor.text )
self.assertEqual(decoded_decoder[-2] ,decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] ,decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
UpperCAmelCase_ : Dict = self.get_feature_extractor()
UpperCAmelCase_ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase_ : Tuple = self.get_decoder()
UpperCAmelCase_ : List[Any] = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
UpperCAmelCase_ : Dict = processor.batch_decode(_SCREAMING_SNAKE_CASE )
else:
with get_context(_SCREAMING_SNAKE_CASE ).Pool() as pool:
UpperCAmelCase_ : str = processor.batch_decode(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = list(_SCREAMING_SNAKE_CASE )
with get_context('''fork''' ).Pool() as p:
UpperCAmelCase_ : List[str] = decoder.decode_beams_batch(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : str = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_SCREAMING_SNAKE_CASE ,decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] ,decoded_processor.text )
self.assertListEqual(_SCREAMING_SNAKE_CASE ,decoded_processor.logit_score )
self.assertListEqual(_SCREAMING_SNAKE_CASE ,decoded_processor.lm_score )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = self.get_feature_extractor()
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
UpperCAmelCase_ : Tuple = self.get_decoder()
UpperCAmelCase_ : Any = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = self._get_dummy_logits()
UpperCAmelCase_ : List[Any] = 15
UpperCAmelCase_ : Optional[Any] = -20.0
UpperCAmelCase_ : Tuple = -4.0
UpperCAmelCase_ : Union[str, Any] = processor.batch_decode(
_SCREAMING_SNAKE_CASE ,beam_width=_SCREAMING_SNAKE_CASE ,beam_prune_logp=_SCREAMING_SNAKE_CASE ,token_min_logp=_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : List[Any] = decoded_processor_out.text
UpperCAmelCase_ : int = list(_SCREAMING_SNAKE_CASE )
with get_context('''fork''' ).Pool() as pool:
UpperCAmelCase_ : List[str] = decoder.decode_beams_batch(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,beam_width=_SCREAMING_SNAKE_CASE ,beam_prune_logp=_SCREAMING_SNAKE_CASE ,token_min_logp=_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : str = [d[0][0] for d in decoded_decoder_out]
UpperCAmelCase_ : Union[str, Any] = [d[0][2] for d in decoded_decoder_out]
UpperCAmelCase_ : Dict = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] ,_SCREAMING_SNAKE_CASE )
self.assertTrue(np.array_equal(_SCREAMING_SNAKE_CASE ,decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.0_54, -18.4_47] ,_SCREAMING_SNAKE_CASE ,atol=1e-3 ) )
self.assertTrue(np.array_equal(_SCREAMING_SNAKE_CASE ,decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.5_54, -13.94_74] ,_SCREAMING_SNAKE_CASE ,atol=1e-3 ) )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : List[Any] = self.get_feature_extractor()
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = self.get_decoder()
UpperCAmelCase_ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = self._get_dummy_logits()
UpperCAmelCase_ : List[Any] = 2.0
UpperCAmelCase_ : Optional[int] = 5.0
UpperCAmelCase_ : List[Any] = -20.0
UpperCAmelCase_ : List[str] = True
UpperCAmelCase_ : str = processor.batch_decode(
_SCREAMING_SNAKE_CASE ,alpha=_SCREAMING_SNAKE_CASE ,beta=_SCREAMING_SNAKE_CASE ,unk_score_offset=_SCREAMING_SNAKE_CASE ,lm_score_boundary=_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : Tuple = decoded_processor_out.text
UpperCAmelCase_ : Optional[Any] = list(_SCREAMING_SNAKE_CASE )
decoder.reset_params(
alpha=_SCREAMING_SNAKE_CASE ,beta=_SCREAMING_SNAKE_CASE ,unk_score_offset=_SCREAMING_SNAKE_CASE ,lm_score_boundary=_SCREAMING_SNAKE_CASE ,)
with get_context('''fork''' ).Pool() as pool:
UpperCAmelCase_ : Optional[int] = decoder.decode_beams_batch(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : List[str] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha ,2.0 )
self.assertEqual(lm_model.beta ,5.0 )
self.assertEqual(lm_model.unk_score_offset ,-20.0 )
self.assertEqual(lm_model.score_boundary ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Dict = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase_ : Tuple = processor.decoder.model_container[processor.decoder._model_key]
UpperCAmelCase_ : Dict = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
UpperCAmelCase_ : Any = os.listdir(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : int = snapshot_download('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase_ : List[str] = WavaVecaProcessorWithLM.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
UpperCAmelCase_ : List[str] = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
UpperCAmelCase_ : List[Any] = os.listdir(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = os.listdir(_SCREAMING_SNAKE_CASE )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : str = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase_ : Optional[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase_ : Tuple = floats_list((3, 1_000) )
UpperCAmelCase_ : Optional[Any] = processor_wavaveca(_SCREAMING_SNAKE_CASE ,return_tensors='''np''' )
UpperCAmelCase_ : List[str] = processor_auto(_SCREAMING_SNAKE_CASE ,return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() ,input_auto[key].sum() ,delta=1e-2 )
UpperCAmelCase_ : Any = self._get_dummy_logits()
UpperCAmelCase_ : int = processor_wavaveca.batch_decode(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = processor_auto.batch_decode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(decoded_wavaveca.text ,decoded_auto.text )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = self.get_feature_extractor()
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : Any = self.get_decoder()
UpperCAmelCase_ : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE ,decoder=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
processor.model_input_names ,feature_extractor.model_input_names ,msg='''`processor` and `feature_extractor` model input names do not match''' ,)
@staticmethod
def a__ ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ : int = [d[key] for d in offsets]
return retrieved_list
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : List[str] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase_ : Union[str, Any] = self._get_dummy_logits()[0]
UpperCAmelCase_ : Tuple = processor.decode(_SCREAMING_SNAKE_CASE ,output_word_offsets=_SCREAMING_SNAKE_CASE )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] ,'''word''' ) ) ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''word''' ) ,['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''start_offset''' ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] ,'''end_offset''' ) ,[1, 3, 5] )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
UpperCAmelCase_ : int = self._get_dummy_logits()
UpperCAmelCase_ : List[str] = processor.batch_decode(_SCREAMING_SNAKE_CASE ,output_word_offsets=_SCREAMING_SNAKE_CASE )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) ,4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(_SCREAMING_SNAKE_CASE ,'''word''' ) ) for o in outputs['''word_offsets''']] ,outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''word''' ) ,['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''start_offset''' ) ,[0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] ,'''end_offset''' ) ,[1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def a__ ( self ) -> Union[str, Any]:
import torch
UpperCAmelCase_ : List[str] = load_dataset('''common_voice''' ,'''en''' ,split='''train''' ,streaming=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = ds.cast_column('''audio''' ,datasets.Audio(sampling_rate=16_000 ) )
UpperCAmelCase_ : Tuple = iter(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = next(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
UpperCAmelCase_ : Dict = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
UpperCAmelCase_ : List[str] = processor(sample['''audio''']['''array'''] ,return_tensors='''pt''' ).input_values
with torch.no_grad():
UpperCAmelCase_ : List[str] = model(_SCREAMING_SNAKE_CASE ).logits.cpu().numpy()
UpperCAmelCase_ : str = processor.decode(logits[0] ,output_word_offsets=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
UpperCAmelCase_ : Any = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
UpperCAmelCase_ : Any = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(_SCREAMING_SNAKE_CASE ,'''word''' ) ) ,_SCREAMING_SNAKE_CASE )
self.assertEqual(''' '''.join(self.get_from_offsets(_SCREAMING_SNAKE_CASE ,'''word''' ) ) ,output.text )
# output times
UpperCAmelCase_ : List[Any] = torch.tensor(self.get_from_offsets(_SCREAMING_SNAKE_CASE ,'''start_time''' ) )
UpperCAmelCase_ : str = torch.tensor(self.get_from_offsets(_SCREAMING_SNAKE_CASE ,'''end_time''' ) )
# fmt: off
UpperCAmelCase_ : str = torch.tensor([1.41_99, 1.65_99, 2.25_99, 3.0, 3.24, 3.59_99, 3.79_99, 4.09_99, 4.26, 4.94, 5.28, 5.65_99, 5.78, 5.94, 6.32, 6.53_99, 6.65_99] )
UpperCAmelCase_ : Optional[int] = torch.tensor([1.53_99, 1.89_99, 2.9, 3.16, 3.53_99, 3.72, 4.01_99, 4.17_99, 4.76, 5.15_99, 5.55_99, 5.69_99, 5.86, 6.19_99, 6.38, 6.61_99, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,atol=0.01 ) )
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,atol=0.01 ) ) | 300 | 0 |
'''simple docstring'''
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 38 |
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = size
snake_case__ = [0] * size
snake_case__ = [0] * size
@staticmethod
def __magic_name__ ( UpperCamelCase__ : int):
'''simple docstring'''
return index | (index + 1)
@staticmethod
def __magic_name__ ( UpperCamelCase__ : int):
'''simple docstring'''
return (index & (index + 1)) - 1
def __magic_name__ ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : int):
'''simple docstring'''
snake_case__ = value
while index < self.size:
snake_case__ = self.get_prev(UpperCamelCase__) + 1
if current_left_border == index:
snake_case__ = value
else:
snake_case__ = max(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
snake_case__ = self.get_next(UpperCamelCase__)
def __magic_name__ ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int):
'''simple docstring'''
right -= 1 # Because of right is exclusive
snake_case__ = 0
while left <= right:
snake_case__ = self.get_prev(UpperCamelCase__)
if left <= current_left:
snake_case__ = max(UpperCamelCase__ , self.tree[right])
snake_case__ = current_left
else:
snake_case__ = max(UpperCamelCase__ , self.arr[right])
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 654 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "rwkv"
a__ = {"max_position_embeddings": "context_length"}
def __init__( self , __snake_case=5_02_77 , __snake_case=10_24 , __snake_case=40_96 , __snake_case=32 , __snake_case=None , __snake_case=None , __snake_case=1e-5 , __snake_case=0 , __snake_case=0 , __snake_case=6 , __snake_case=False , __snake_case=True , **__snake_case , ):
_UpperCamelCase : str = vocab_size
_UpperCamelCase : int = context_length
_UpperCamelCase : Tuple = hidden_size
_UpperCamelCase : Tuple = num_hidden_layers
_UpperCamelCase : Dict = attention_hidden_size if attention_hidden_size is not None else hidden_size
_UpperCamelCase : Tuple = intermediate_size if intermediate_size is not None else 4 * hidden_size
_UpperCamelCase : Union[str, Any] = layer_norm_epsilon
_UpperCamelCase : Dict = rescale_every
_UpperCamelCase : Optional[Any] = use_cache
_UpperCamelCase : str = bos_token_id
_UpperCamelCase : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case)
| 719 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class lowercase ( _lowercase ):
"""simple docstring"""
a__ = "facebook/bart-large-mnli"
a__ = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
a__ = "text_classifier"
a__ = AutoTokenizer
a__ = AutoModelForSequenceClassification
a__ = ["text", ["text"]]
a__ = ["text"]
def A__ ( self):
super().setup()
_UpperCamelCase : List[Any] = self.model.config
_UpperCamelCase : Optional[int] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail'):
_UpperCamelCase : Tuple = int(__snake_case)
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.')
def A__ ( self , __snake_case , __snake_case):
_UpperCamelCase : List[Any] = labels
return self.pre_processor(
[text] * len(__snake_case) , [f'''This example is {label}''' for label in labels] , return_tensors='pt' , padding='max_length' , )
def A__ ( self , __snake_case):
_UpperCamelCase : str = outputs.logits
_UpperCamelCase : Optional[Any] = torch.argmax(logits[:, 2]).item()
return self._labels[label_id]
| 648 | 0 |
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
a : Tuple = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
a : Optional[int] = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
a : Tuple = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
a : Optional[int] = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
a : Optional[int] = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def A ( self : Dict ):
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , )
def A ( self : str , a_ : List[Any] , a_ : Optional[int] , a_ : Any=[1, 10, 100] , a_ : Optional[int]=4 , a_ : Optional[int]=3.0 ):
"""simple docstring"""
if os.getenv("HF_ALLOW_CODE_EVAL" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("This metric is currently not supported on Windows." )
with ThreadPoolExecutor(max_workers=a_ ) as executor:
__snake_case = []
__snake_case = Counter()
__snake_case = 0
__snake_case = defaultdict(a_ )
for task_id, (candidates, test_case) in enumerate(zip(a_ , a_ ) ):
for candidate in candidates:
__snake_case = candidate + "\n" + test_case
__snake_case = (test_program, timeout, task_id, completion_id[task_id])
__snake_case = executor.submit(a_ , *a_ )
futures.append(a_ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(a_ ):
__snake_case = future.result()
results[result["task_id"]].append((result["completion_id"], result) )
__snake_case , __snake_case = [], []
for result in results.values():
result.sort()
__snake_case = [r[1]["passed"] for r in result]
total.append(len(a_ ) )
correct.append(sum(a_ ) )
__snake_case = np.array(a_ )
__snake_case = np.array(a_ )
__snake_case = k
__snake_case = {f'''pass@{k}''': estimate_pass_at_k(a_ , a_ , a_ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def __UpperCAmelCase ( _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any ) -> List[Any]:
def estimator(_UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__snake_case = itertools.repeat(_UpperCAmelCase , len(_UpperCAmelCase ) )
else:
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
__snake_case = iter(_UpperCAmelCase )
return np.array([estimator(int(_UpperCAmelCase ) , int(_UpperCAmelCase ) , _UpperCAmelCase ) for n, c in zip(_UpperCAmelCase , _UpperCAmelCase )] )
| 69 |
class __magic_name__ :
'''simple docstring'''
def __init__( self: Optional[int] ):
SCREAMING_SNAKE_CASE_ = {}
def _A ( self: Optional[Any] ):
print(self.vertex )
for i in self.vertex:
print(_lowerCamelCase , ''' -> ''' , ''' -> '''.join([str(_lowerCamelCase ) for j in self.vertex[i]] ) )
def _A ( self: Optional[int] , _lowerCamelCase: int , _lowerCamelCase: int ):
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_lowerCamelCase )
else:
# else make a new vertex
SCREAMING_SNAKE_CASE_ = [to_vertex]
def _A ( self: int ):
# visited array for storing already visited nodes
SCREAMING_SNAKE_CASE_ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(_lowerCamelCase , _lowerCamelCase )
def _A ( self: Dict , _lowerCamelCase: int , _lowerCamelCase: list ):
# mark start vertex as visited
SCREAMING_SNAKE_CASE_ = True
print(_lowerCamelCase , end=''' ''' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 234 | 0 |
import cva
import numpy as np
class _UpperCamelCase :
def __init__( self :Union[str, Any] , lowerCamelCase :float , lowerCamelCase :int ) -> str:
if k in (0.04, 0.06):
UpperCAmelCase__ = k
UpperCAmelCase__ = window_size
else:
raise ValueError("invalid k value" )
def __str__( self :str ) -> str:
return str(self.k )
def UpperCAmelCase_ ( self :str , lowerCamelCase :str ) -> tuple[cva.Mat, list[list[int]]]:
UpperCAmelCase__ = cva.imread(lowerCamelCase , 0 )
UpperCAmelCase__ , UpperCAmelCase__ = img.shape
UpperCAmelCase__ = []
UpperCAmelCase__ = img.copy()
UpperCAmelCase__ = cva.cvtColor(lowerCamelCase , cva.COLOR_GRAY2RGB )
UpperCAmelCase__ , UpperCAmelCase__ = np.gradient(lowerCamelCase )
UpperCAmelCase__ = dx**2
UpperCAmelCase__ = dy**2
UpperCAmelCase__ = dx * dy
UpperCAmelCase__ = 0.04
UpperCAmelCase__ = self.window_size // 2
for y in range(lowerCamelCase , h - offset ):
for x in range(lowerCamelCase , w - offset ):
UpperCAmelCase__ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase__ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase__ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
UpperCAmelCase__ = (wxx * wyy) - (wxy**2)
UpperCAmelCase__ = wxx + wyy
UpperCAmelCase__ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = HarrisCorner(0.0_4, 3)
_lowerCAmelCase, _lowerCAmelCase : Any = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 364 |
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_lowerCAmelCase : int = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def lowerCAmelCase ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : float , _lowerCAmelCase : int = 1_6000 ):
"""simple docstring"""
UpperCAmelCase__ = int(round(sample_rate * max_length ) )
if len(_lowerCAmelCase ) <= sample_length:
return wav
UpperCAmelCase__ = randint(0 , len(_lowerCAmelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class _UpperCamelCase :
UpperCAmelCase_ = field(default=lowerCAmelCase , metadata={"""help""": """Name of a dataset from the datasets package"""} )
UpperCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
UpperCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """A file containing the training audio paths and labels."""} )
UpperCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """A file containing the validation audio paths and labels."""} )
UpperCAmelCase_ = field(
default="""train""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
UpperCAmelCase_ = field(
default="""validation""" , metadata={
"""help""": (
"""The name of the training data set split to use (via the datasets library). Defaults to 'validation'"""
)
} , )
UpperCAmelCase_ = field(
default="""audio""" , metadata={"""help""": """The name of the dataset column containing the audio data. Defaults to 'audio'"""} , )
UpperCAmelCase_ = field(
default="""label""" , metadata={"""help""": """The name of the dataset column containing the labels. Defaults to 'label'"""} )
UpperCAmelCase_ = field(
default=lowerCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
UpperCAmelCase_ = field(
default=lowerCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
UpperCAmelCase_ = field(
default=20 , metadata={"""help""": """Audio clips will be randomly cut to this length during training if the value is set."""} , )
@dataclass
class _UpperCamelCase :
UpperCAmelCase_ = field(
default="""facebook/wav2vec2-base""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
UpperCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from the Hub"""} )
UpperCAmelCase_ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Name or path of preprocessor config."""} )
UpperCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Whether to freeze the feature encoder layers of the model."""} )
UpperCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Whether to generate an attention mask in the feature extractor."""} )
UpperCAmelCase_ = field(
default=lowerCAmelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
UpperCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
UpperCAmelCase_ = field(
default=lowerCAmelCase , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def UpperCAmelCase_ ( self :str ) -> List[Any]:
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`." , lowerCamelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def lowerCAmelCase ( ):
"""simple docstring"""
UpperCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , _lowerCAmelCase , _lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase__ = training_args.get_process_log_level()
logger.setLevel(_lowerCAmelCase )
transformers.utils.logging.set_verbosity(_lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
UpperCAmelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
UpperCAmelCase__ = DatasetDict()
UpperCAmelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'''--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F'''{", ".join(raw_datasets["train"].column_names )}.''' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'''--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '''
"Make sure to set `--label_column_name` to the correct text column - one of "
F'''{", ".join(raw_datasets["train"].column_names )}.''' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
UpperCAmelCase__ = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
UpperCAmelCase__ = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
UpperCAmelCase__ = feature_extractor.model_input_names[0]
def train_transforms(_lowerCAmelCase : Tuple ):
UpperCAmelCase__ = []
for audio in batch[data_args.audio_column_name]:
UpperCAmelCase__ = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(_lowerCAmelCase )
UpperCAmelCase__ = feature_extractor(_lowerCAmelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCAmelCase__ = {model_input_name: inputs.get(_lowerCAmelCase )}
UpperCAmelCase__ = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_lowerCAmelCase : Union[str, Any] ):
UpperCAmelCase__ = [audio["array"] for audio in batch[data_args.audio_column_name]]
UpperCAmelCase__ = feature_extractor(_lowerCAmelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCAmelCase__ = {model_input_name: inputs.get(_lowerCAmelCase )}
UpperCAmelCase__ = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCAmelCase__ = raw_datasets["train"].features[data_args.label_column_name].names
UpperCAmelCase__ , UpperCAmelCase__ = {}, {}
for i, label in enumerate(_lowerCAmelCase ):
UpperCAmelCase__ = str(_lowerCAmelCase )
UpperCAmelCase__ = label
# Load the accuracy metric from the datasets package
UpperCAmelCase__ = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_lowerCAmelCase : Optional[Any] ):
UpperCAmelCase__ = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=_lowerCAmelCase , references=eval_pred.label_ids )
UpperCAmelCase__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_lowerCAmelCase ) , labelaid=_lowerCAmelCase , idalabel=_lowerCAmelCase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase__ = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCAmelCase__ = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(_lowerCAmelCase , output_all_columns=_lowerCAmelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCAmelCase__ = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(_lowerCAmelCase , output_all_columns=_lowerCAmelCase )
# Initialize our trainer
UpperCAmelCase__ = Trainer(
model=_lowerCAmelCase , args=_lowerCAmelCase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=_lowerCAmelCase , tokenizer=_lowerCAmelCase , )
# Training
if training_args.do_train:
UpperCAmelCase__ = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase__ = last_checkpoint
UpperCAmelCase__ = trainer.train(resume_from_checkpoint=_lowerCAmelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCAmelCase__ = trainer.evaluate()
trainer.log_metrics("eval" , _lowerCAmelCase )
trainer.save_metrics("eval" , _lowerCAmelCase )
# Write model card and (optionally) push to hub
UpperCAmelCase__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowerCAmelCase )
else:
trainer.create_model_card(**_lowerCAmelCase )
if __name__ == "__main__":
main()
| 364 | 1 |
from collections.abc import Callable
class lowerCAmelCase__ :
def __init__( self , a = None ) -> None:
'''simple docstring'''
_UpperCamelCase = []
# Stores indexes of each item for supporting updates and deletion.
_UpperCamelCase = {}
# Stores current size of heap.
_UpperCamelCase = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
_UpperCamelCase = key or (lambda a : x)
def A_ ( self , a ) -> int | None:
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def A_ ( self , a ) -> int | None:
'''simple docstring'''
_UpperCamelCase = int(2 * i + 1 )
return left if 0 < left < self.size else None
def A_ ( self , a ) -> int | None:
'''simple docstring'''
_UpperCamelCase = int(2 * i + 2 )
return right if 0 < right < self.size else None
def A_ ( self , a , a ) -> None:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
_UpperCamelCase , _UpperCamelCase = self.arr[j], self.arr[i]
def A_ ( self , a , a ) -> bool:
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def A_ ( self , a ) -> int:
'''simple docstring'''
_UpperCamelCase = self._left(a )
_UpperCamelCase = self._right(a )
_UpperCamelCase = i
if left is not None and not self._cmp(a , a ):
_UpperCamelCase = left
if right is not None and not self._cmp(a , a ):
_UpperCamelCase = right
return valid_parent
def A_ ( self , a ) -> None:
'''simple docstring'''
_UpperCamelCase = self._parent(a )
while parent is not None and not self._cmp(a , a ):
self._swap(a , a )
_UpperCamelCase , _UpperCamelCase = parent, self._parent(a )
def A_ ( self , a ) -> None:
'''simple docstring'''
_UpperCamelCase = self._get_valid_parent(a )
while valid_parent != index:
self._swap(a , a )
_UpperCamelCase , _UpperCamelCase = valid_parent, self._get_valid_parent(a )
def A_ ( self , a , a ) -> None:
'''simple docstring'''
if item not in self.pos_map:
return
_UpperCamelCase = self.pos_map[item]
_UpperCamelCase = [item, self.key(a )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(a )
self._heapify_down(a )
def A_ ( self , a ) -> None:
'''simple docstring'''
if item not in self.pos_map:
return
_UpperCamelCase = self.pos_map[item]
del self.pos_map[item]
_UpperCamelCase = self.arr[self.size - 1]
_UpperCamelCase = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(a )
self._heapify_down(a )
def A_ ( self , a , a ) -> None:
'''simple docstring'''
_UpperCamelCase = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(a )] )
else:
_UpperCamelCase = [item, self.key(a )]
_UpperCamelCase = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def A_ ( self ) -> tuple | None:
'''simple docstring'''
return self.arr[0] if self.size else None
def A_ ( self ) -> tuple | None:
'''simple docstring'''
_UpperCamelCase = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def __A() -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 612 |
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar("T")
lowerCamelCase__ = TypeVar("U")
class lowerCAmelCase__ ( Generic[T, U] ):
def __init__( self , a , a ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = key
_UpperCamelCase = val
_UpperCamelCase = None
_UpperCamelCase = None
def __repr__( self ) -> str:
'''simple docstring'''
return (
F'Node: key: {self.key}, val: {self.val}, '
F'has next: {bool(self.next )}, has prev: {bool(self.prev )}'
)
class lowerCAmelCase__ ( Generic[T, U] ):
def __init__( self ) -> None:
'''simple docstring'''
_UpperCamelCase = DoubleLinkedListNode(a , a )
_UpperCamelCase = DoubleLinkedListNode(a , a )
_UpperCamelCase , _UpperCamelCase = self.rear, self.head
def __repr__( self ) -> str:
'''simple docstring'''
_UpperCamelCase = ["""DoubleLinkedList"""]
_UpperCamelCase = self.head
while node.next is not None:
rep.append(str(a ) )
_UpperCamelCase = node.next
rep.append(str(self.rear ) )
return ",\n ".join(a )
def A_ ( self , a ) -> None:
'''simple docstring'''
_UpperCamelCase = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_UpperCamelCase = node
_UpperCamelCase = previous
_UpperCamelCase = node
_UpperCamelCase = self.rear
def A_ ( self , a ) -> DoubleLinkedListNode[T, U] | None:
'''simple docstring'''
if node.prev is None or node.next is None:
return None
_UpperCamelCase = node.next
_UpperCamelCase = node.prev
_UpperCamelCase = None
_UpperCamelCase = None
return node
class lowerCAmelCase__ ( Generic[T, U] ):
UpperCamelCase_ : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self , a ) -> int:
'''simple docstring'''
_UpperCamelCase = DoubleLinkedList()
_UpperCamelCase = capacity
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = {}
def __repr__( self ) -> str:
'''simple docstring'''
return (
F'CacheInfo(hits={self.hits}, misses={self.miss}, '
F'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self , a ) -> bool:
'''simple docstring'''
return key in self.cache
def A_ ( self , a ) -> U | None:
'''simple docstring'''
if key in self.cache:
self.hits += 1
_UpperCamelCase = self.cache[key]
_UpperCamelCase = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(a )
return node.val
self.miss += 1
return None
def A_ ( self , a , a ) -> None:
'''simple docstring'''
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_UpperCamelCase = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(a ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_UpperCamelCase = DoubleLinkedListNode(a , a )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_UpperCamelCase = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_UpperCamelCase = value
self.list.add(a )
@classmethod
def A_ ( cls , a = 1_28 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
'''simple docstring'''
def cache_decorator_inner(a ) -> Callable[..., U]:
def cache_decorator_wrapper(*a ) -> U:
if func not in cls.decorator_function_to_instance_map:
_UpperCamelCase = LRUCache(a )
_UpperCamelCase = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_UpperCamelCase = func(*a )
cls.decorator_function_to_instance_map[func].put(args[0] , a )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(a , """cache_info""" , a ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 612 | 1 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowercase ( _UpperCAmelCase ):
UpperCamelCase = (DDPMScheduler,)
def _lowercase ( self : List[str] , **__lowerCamelCase : List[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**__UpperCamelCase )
return config
def _lowercase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def _lowercase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__UpperCamelCase , beta_end=__UpperCamelCase )
def _lowercase ( self : int ) -> Tuple:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__UpperCamelCase )
def _lowercase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__UpperCamelCase )
def _lowercase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__UpperCamelCase )
def _lowercase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
self.check_over_configs(thresholding=__UpperCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , )
def _lowercase ( self : int ) -> List[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def _lowercase ( self : int ) -> int:
"""simple docstring"""
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=__UpperCamelCase )
def _lowercase ( self : List[str] ) -> int:
"""simple docstring"""
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**__UpperCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1e-5
def _lowercase ( self : List[Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**__UpperCamelCase )
UpperCAmelCase = len(__UpperCamelCase )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter
UpperCAmelCase = torch.manual_seed(0 )
for t in reversed(range(__UpperCamelCase ) ):
# 1. predict noise residual
UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
# 2. predict previous mean of sample x_t-1
UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCAmelCase = pred_prev_sample
UpperCAmelCase = torch.sum(torch.abs(__UpperCamelCase ) )
UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_372 ) < 1e-3
def _lowercase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
UpperCAmelCase = scheduler_class(**__UpperCamelCase )
UpperCAmelCase = len(__UpperCamelCase )
UpperCAmelCase = self.dummy_model()
UpperCAmelCase = self.dummy_sample_deter
UpperCAmelCase = torch.manual_seed(0 )
for t in reversed(range(__UpperCamelCase ) ):
# 1. predict noise residual
UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
# 2. predict previous mean of sample x_t-1
UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
UpperCAmelCase = pred_prev_sample
UpperCAmelCase = torch.sum(torch.abs(__UpperCamelCase ) )
UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_631 ) < 1e-3
def _lowercase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**__UpperCamelCase )
UpperCAmelCase = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=__UpperCamelCase )
UpperCAmelCase = scheduler.timesteps
for i, timestep in enumerate(__UpperCamelCase ):
if i == len(__UpperCamelCase ) - 1:
UpperCAmelCase = -1
else:
UpperCAmelCase = timesteps[i + 1]
UpperCAmelCase = scheduler.previous_timestep(__UpperCamelCase )
UpperCAmelCase = prev_t.item()
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def _lowercase ( self : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**__UpperCamelCase )
UpperCAmelCase = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(__UpperCamelCase , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=__UpperCamelCase )
def _lowercase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**__UpperCamelCase )
UpperCAmelCase = [1_0_0, 8_7, 5_0, 1, 0]
UpperCAmelCase = len(__UpperCamelCase )
with self.assertRaises(__UpperCamelCase , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=__UpperCamelCase , timesteps=__UpperCamelCase )
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.scheduler_classes[0]
UpperCAmelCase = self.get_scheduler_config()
UpperCAmelCase = scheduler_class(**__UpperCamelCase )
UpperCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__UpperCamelCase , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=__UpperCamelCase )
| 719 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"""facebook/nllb-moe-54B""": """https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json""",
}
class __lowercase ( __snake_case ):
UpperCamelCase = '''nllb-moe'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[int] , __lowerCamelCase : Optional[Any]=1_2_8_1_1_2 , __lowerCamelCase : Dict=1_0_2_4 , __lowerCamelCase : Optional[int]=1_2 , __lowerCamelCase : Union[str, Any]=4_0_9_6 , __lowerCamelCase : List[str]=1_6 , __lowerCamelCase : List[str]=1_2 , __lowerCamelCase : int=4_0_9_6 , __lowerCamelCase : Tuple=1_6 , __lowerCamelCase : str=0.05 , __lowerCamelCase : List[str]=0.05 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : str="relu" , __lowerCamelCase : Dict=1_0_2_4 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : List[Any]=0.0 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : Dict=2 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Any=False , __lowerCamelCase : Tuple="float32" , __lowerCamelCase : Any=False , __lowerCamelCase : Optional[int]=1_2_8 , __lowerCamelCase : List[str]=6_4 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : Tuple=4 , __lowerCamelCase : str=0.001 , __lowerCamelCase : Optional[int]=0.001 , __lowerCamelCase : Tuple="all" , __lowerCamelCase : Any=False , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : List[str]=1.0 , __lowerCamelCase : Dict=0.2 , __lowerCamelCase : Union[str, Any]=1 , __lowerCamelCase : int=0 , __lowerCamelCase : Dict=2 , __lowerCamelCase : int=False , **__lowerCamelCase : str , ) -> int:
"""simple docstring"""
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = d_model
UpperCAmelCase = encoder_ffn_dim
UpperCAmelCase = encoder_layers
UpperCAmelCase = encoder_attention_heads
UpperCAmelCase = decoder_ffn_dim
UpperCAmelCase = decoder_layers
UpperCAmelCase = decoder_attention_heads
UpperCAmelCase = dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = activation_function
UpperCAmelCase = init_std
UpperCAmelCase = encoder_layerdrop
UpperCAmelCase = decoder_layerdrop
UpperCAmelCase = use_cache
UpperCAmelCase = encoder_layers
UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase = router_z_loss_coef
UpperCAmelCase = router_aux_loss_coef
UpperCAmelCase = decoder_sparse_step
UpperCAmelCase = encoder_sparse_step
UpperCAmelCase = num_experts
UpperCAmelCase = expert_capacity
UpperCAmelCase = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
UpperCAmelCase = router_dtype
UpperCAmelCase = router_ignore_padding_tokens
UpperCAmelCase = batch_prioritized_routing
UpperCAmelCase = second_expert_policy
UpperCAmelCase = normalize_router_prob_before_dropping
UpperCAmelCase = moe_eval_capacity_token_fraction
UpperCAmelCase = moe_token_dropout
UpperCAmelCase = output_router_logits
super().__init__(
pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , is_encoder_decoder=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , **__lowerCamelCase , )
| 627 | 0 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__A = 16
__A = 32
def __A ( _lowercase , _lowercase = 16 , _lowercase = "bert-base-cased" ):
'''simple docstring'''
_A = AutoTokenizer.from_pretrained(_lowercase )
_A = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_lowercase ):
# max_length=None => use the model max length (it's actually the default)
_A = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=_lowercase , max_length=_lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_A = datasets.map(
_lowercase , batched=_lowercase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=_lowercase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_A = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowercase , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' )
return tokenizer.pad(_lowercase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
_A = DataLoader(
tokenized_datasets['''train'''] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
_A = DataLoader(
tokenized_datasets['''validation'''] , shuffle=_lowercase , collate_fn=_lowercase , batch_size=_lowercase )
return train_dataloader, eval_dataloader
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_A = config['''lr''']
_A = int(config['''num_epochs'''] )
_A = int(config['''seed'''] )
_A = int(config['''batch_size'''] )
_A = args.model_name_or_path
set_seed(_lowercase )
_A ,_A = get_dataloaders(_lowercase , _lowercase , _lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_A = AutoModelForSequenceClassification.from_pretrained(_lowercase , return_dict=_lowercase )
# Instantiate optimizer
_A = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_A = optimizer_cls(params=model.parameters() , lr=_lowercase )
if accelerator.state.deepspeed_plugin is not None:
_A = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
_A = 1
_A = (len(_lowercase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_A = get_linear_schedule_with_warmup(
optimizer=_lowercase , num_warmup_steps=0 , num_training_steps=_lowercase , )
else:
_A = DummyScheduler(_lowercase , total_num_steps=_lowercase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_A ,_A ,_A ,_A ,_A = accelerator.prepare(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# We need to keep track of how many total steps we have iterated over
_A = 0
# We also need to keep track of the stating epoch so files are named properly
_A = 0
# Now we train the model
_A = evaluate.load('''glue''' , '''mrpc''' )
_A = 0
_A = {}
for epoch in range(_lowercase , _lowercase ):
model.train()
for step, batch in enumerate(_lowercase ):
_A = model(**_lowercase )
_A = outputs.loss
_A = loss / gradient_accumulation_steps
accelerator.backward(_lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
_A = 0
for step, batch in enumerate(_lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_A = model(**_lowercase )
_A = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_A ,_A = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_lowercase ) - 1:
_A = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_A = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_lowercase , references=_lowercase , )
_A = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , _lowercase )
_A = eval_metric['''accuracy''']
if best_performance < eval_metric["accuracy"]:
_A = eval_metric['''accuracy''']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"""
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f:
json.dump(_lowercase , _lowercase )
def __A ( ):
'''simple docstring'''
_A = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=_lowercase , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=_lowercase , )
parser.add_argument(
'''--output_dir''' , type=_lowercase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--performance_lower_bound''' , type=_lowercase , default=_lowercase , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , )
parser.add_argument(
'''--num_epochs''' , type=_lowercase , default=3 , help='''Number of train epochs.''' , )
_A = parser.parse_args()
_A = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(_lowercase , _lowercase )
if __name__ == "__main__":
main()
| 484 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self: List[str] , __A: List[str] , __A: List[str]=7 , __A: Tuple=3 , __A: Optional[int]=30 , __A: Optional[Any]=4_00 , __A: int=True , __A: str=None , __A: int=True , __A: Any=[0.5, 0.5, 0.5] , __A: Dict=[0.5, 0.5, 0.5] , __A: Dict=True , __A: str=1 / 2_55 , __A: Dict=True , ) -> Tuple:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_A = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
_A = parent
_A = batch_size
_A = num_channels
_A = min_resolution
_A = max_resolution
_A = do_resize
_A = size
_A = do_normalize
_A = image_mean
_A = image_std
_A = do_rescale
_A = rescale_factor
_A = do_pad
def __A ( self: Optional[Any] ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def __A ( self: Any , __A: Optional[Any] , __A: int=False ) -> List[str]:
if not batched:
_A = image_inputs[0]
if isinstance(__A , Image.Image ):
_A ,_A = image.size
else:
_A ,_A = image.shape[1], image.shape[2]
if w < h:
_A = int(self.size['''shortest_edge'''] * h / w )
_A = self.size['''shortest_edge''']
elif w > h:
_A = self.size['''shortest_edge''']
_A = int(self.size['''shortest_edge'''] * w / h )
else:
_A = self.size['''shortest_edge''']
_A = self.size['''shortest_edge''']
else:
_A = []
for image in image_inputs:
_A ,_A = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_A = max(__A , key=lambda __A : item[0] )[0]
_A = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
"""simple docstring"""
A_ = DeformableDetrImageProcessor if is_vision_available() else None
def __A ( self: List[str] ) -> List[str]:
_A = DeformableDetrImageProcessingTester(self )
@property
def __A ( self: Tuple ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self: Any ) -> List[str]:
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , '''image_mean''' ) )
self.assertTrue(hasattr(__A , '''image_std''' ) )
self.assertTrue(hasattr(__A , '''do_normalize''' ) )
self.assertTrue(hasattr(__A , '''do_resize''' ) )
self.assertTrue(hasattr(__A , '''do_rescale''' ) )
self.assertTrue(hasattr(__A , '''do_pad''' ) )
self.assertTrue(hasattr(__A , '''size''' ) )
def __A ( self: Tuple ) -> Optional[Any]:
_A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , __A )
_A = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__A )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , __A )
def __A ( self: Dict ) -> Any:
pass
def __A ( self: str ) -> List[str]:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_A ,_A = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A ,_A = self.image_processor_tester.get_expected_values(__A , batched=__A )
_A = image_processing(__A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __A ( self: str ) -> Tuple:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_A ,_A = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A = image_processing(__A , return_tensors='''pt''' ).pixel_values
_A ,_A = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __A ( self: int ) -> Any:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_A ,_A = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A = image_processing(__A , return_tensors='''pt''' ).pixel_values
_A ,_A = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __A ( self: Optional[Any] ) -> Tuple:
# prepare image and target
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
_A = json.loads(f.read() )
_A = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
_A = DeformableDetrImageProcessor()
_A = image_processing(images=__A , annotations=__A , return_tensors='''pt''' )
# verify pixel values
_A = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , __A )
_A = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
_A = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __A ) )
# verify boxes
_A = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __A )
_A = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __A , atol=1e-3 ) )
# verify image_id
_A = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __A ) )
# verify is_crowd
_A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __A ) )
# verify class_labels
_A = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __A ) )
# verify orig_size
_A = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __A ) )
# verify size
_A = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __A ) )
@slow
def __A ( self: Dict ) -> Optional[int]:
# prepare image, target and masks_path
_A = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
_A = json.loads(f.read() )
_A = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
_A = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
_A = DeformableDetrImageProcessor(format='''coco_panoptic''' )
_A = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors='''pt''' )
# verify pixel values
_A = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , __A )
_A = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
_A = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __A ) )
# verify boxes
_A = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __A )
_A = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __A , atol=1e-3 ) )
# verify image_id
_A = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __A ) )
# verify is_crowd
_A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __A ) )
# verify class_labels
_A = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __A ) )
# verify masks
_A = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __A )
# verify orig_size
_A = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __A ) )
# verify size
_A = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __A ) )
| 484 | 1 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def _UpperCAmelCase ( UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Any=1_024 ):
"""simple docstring"""
__lowerCamelCase : str = [], []
__lowerCamelCase : Any = list(zip(UpperCAmelCase , UpperCAmelCase ) )
__lowerCamelCase : List[str] = sorted_examples[0]
def is_too_big(UpperCAmelCase : Optional[Any] ):
return tok(UpperCAmelCase , return_tensors="""pt""" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
__lowerCamelCase : Union[str, Any] = new_src + """ """ + src
__lowerCamelCase : str = new_tgt + """ """ + tgt
if is_too_big(UpperCAmelCase ) or is_too_big(UpperCAmelCase ): # cant fit, finalize example
finished_src.append(UpperCAmelCase )
finished_tgt.append(UpperCAmelCase )
__lowerCamelCase : str = src, tgt
else: # can fit, keep adding
__lowerCamelCase : int = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(UpperCAmelCase )
finished_tgt.append(UpperCAmelCase )
return finished_src, finished_tgt
def _UpperCAmelCase ( UpperCAmelCase : Tuple , UpperCAmelCase : Path , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase : List[Any] = Path(UpperCAmelCase )
save_path.mkdir(exist_ok=UpperCAmelCase )
for split in ["train"]:
__lowerCamelCase : List[Any] = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
__lowerCamelCase : Tuple = [x.rstrip() for x in Path(UpperCAmelCase ).open().readlines()]
__lowerCamelCase : Tuple = [x.rstrip() for x in Path(UpperCAmelCase ).open().readlines()]
__lowerCamelCase : int = pack_examples(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
print(f"""packed {split} split from {len(UpperCAmelCase )} examples -> {len(UpperCAmelCase )}.""" )
Path(save_path / f"""{split}.source""" ).open("""w""" ).write("""\n""".join(UpperCAmelCase ) )
Path(save_path / f"""{split}.target""" ).open("""w""" ).write("""\n""".join(UpperCAmelCase ) )
for split in ["val", "test"]:
__lowerCamelCase : Optional[Any] = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
shutil.copyfile(UpperCAmelCase , save_path / f"""{split}.source""" )
shutil.copyfile(UpperCAmelCase , save_path / f"""{split}.target""" )
def _UpperCAmelCase ( ):
"""simple docstring"""
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--tok_name""" , type=UpperCAmelCase , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""--max_seq_len""" , type=UpperCAmelCase , default=128 )
parser.add_argument("""--data_dir""" , type=UpperCAmelCase )
parser.add_argument("""--save_path""" , type=UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = parser.parse_args()
__lowerCamelCase : Tuple = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(UpperCAmelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 709 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def _UpperCAmelCase ( UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Any=1_024 ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase : str = [], []
__lowerCamelCase : Any = list(zip(UpperCAmelCase , UpperCAmelCase ) )
__lowerCamelCase , __lowerCamelCase : List[str] = sorted_examples[0]
def is_too_big(UpperCAmelCase : Optional[Any] ):
return tok(UpperCAmelCase , return_tensors="""pt""" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
__lowerCamelCase : Union[str, Any] = new_src + """ """ + src
__lowerCamelCase : str = new_tgt + """ """ + tgt
if is_too_big(UpperCAmelCase ) or is_too_big(UpperCAmelCase ): # cant fit, finalize example
finished_src.append(UpperCAmelCase )
finished_tgt.append(UpperCAmelCase )
__lowerCamelCase , __lowerCamelCase : str = src, tgt
else: # can fit, keep adding
__lowerCamelCase , __lowerCamelCase : int = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(UpperCAmelCase )
finished_tgt.append(UpperCAmelCase )
return finished_src, finished_tgt
def _UpperCAmelCase ( UpperCAmelCase : Tuple , UpperCAmelCase : Path , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase : List[Any] = Path(UpperCAmelCase )
save_path.mkdir(exist_ok=UpperCAmelCase )
for split in ["train"]:
__lowerCamelCase , __lowerCamelCase : List[Any] = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
__lowerCamelCase : Tuple = [x.rstrip() for x in Path(UpperCAmelCase ).open().readlines()]
__lowerCamelCase : Tuple = [x.rstrip() for x in Path(UpperCAmelCase ).open().readlines()]
__lowerCamelCase , __lowerCamelCase : int = pack_examples(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
print(f"""packed {split} split from {len(UpperCAmelCase )} examples -> {len(UpperCAmelCase )}.""" )
Path(save_path / f"""{split}.source""" ).open("""w""" ).write("""\n""".join(UpperCAmelCase ) )
Path(save_path / f"""{split}.target""" ).open("""w""" ).write("""\n""".join(UpperCAmelCase ) )
for split in ["val", "test"]:
__lowerCamelCase , __lowerCamelCase : Optional[Any] = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
shutil.copyfile(UpperCAmelCase , save_path / f"""{split}.source""" )
shutil.copyfile(UpperCAmelCase , save_path / f"""{split}.target""" )
def _UpperCAmelCase ( ):
"""simple docstring"""
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--tok_name""" , type=UpperCAmelCase , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""--max_seq_len""" , type=UpperCAmelCase , default=128 )
parser.add_argument("""--data_dir""" , type=UpperCAmelCase )
parser.add_argument("""--save_path""" , type=UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = parser.parse_args()
__lowerCamelCase : Tuple = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(UpperCAmelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 458 | 0 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase : Tuple = {"""vocab_file""": """vocab.txt"""}
UpperCamelCase : Union[str, Any] = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
UpperCamelCase : int = {
"""openbmb/cpm-ant-10b""": 1024,
}
def UpperCamelCase_ ( __a ) -> Tuple:
a__ : List[str] = collections.OrderedDict()
with open(__a , "r" , encoding="utf-8" ) as reader:
a__ : Optional[int] = reader.readlines()
for index, token in enumerate(__a ):
a__ : List[str] = token.rstrip("\n" )
a__ : Tuple = index
return vocab
class A__ ( A__ ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Union[str, Any]="<unk>" , lowerCamelCase__ : Any=200 ):
a__ : Tuple = vocab
a__ : Tuple = unk_token
a__ : List[Any] = max_input_chars_per_word
def _UpperCamelCase( self : int , lowerCamelCase__ : List[str] ):
a__ : List[str] = list(lowerCamelCase__ )
if len(lowerCamelCase__ ) > self.max_input_chars_per_word:
return [self.unk_token]
a__ : Optional[int] = 0
a__ : Tuple = []
while start < len(lowerCamelCase__ ):
a__ : Union[str, Any] = len(lowerCamelCase__ )
a__ : List[str] = None
while start < end:
a__ : int = "".join(chars[start:end] )
if substr in self.vocab:
a__ : Optional[Any] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(lowerCamelCase__ )
a__ : List[Any] = end
return sub_tokens
class A__ ( A__ ):
"""simple docstring"""
_lowercase = VOCAB_FILES_NAMES
_lowercase = PRETRAINED_VOCAB_FILES_MAP
_lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase = ['input_ids', 'attention_mask']
_lowercase = False
def __init__( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int]="<d>" , lowerCamelCase__ : List[Any]="</d>" , lowerCamelCase__ : Union[str, Any]="<s>" , lowerCamelCase__ : Union[str, Any]="</s>" , lowerCamelCase__ : List[Any]="<pad>" , lowerCamelCase__ : str="<unk>" , lowerCamelCase__ : int="</n>" , lowerCamelCase__ : Optional[Any]="</_>" , lowerCamelCase__ : Any="left" , **lowerCamelCase__ : Optional[Any] , ):
requires_backends(self , ["jieba"] )
super().__init__(
bod_token=lowerCamelCase__ , eod_token=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , line_token=lowerCamelCase__ , space_token=lowerCamelCase__ , padding_side=lowerCamelCase__ , **lowerCamelCase__ , )
a__ : Union[str, Any] = bod_token
a__ : Optional[Any] = eod_token
a__ : Any = load_vocab(lowerCamelCase__ )
a__ : Dict = self.encoder[space_token]
a__ : List[str] = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
a__ : Optional[Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase__ : x[1] ) )
a__ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
a__ : Optional[int] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _UpperCamelCase( self : List[str] ):
return self.encoder[self.bod_token]
@property
def _UpperCamelCase( self : Union[str, Any] ):
return self.encoder[self.eod_token]
@property
def _UpperCamelCase( self : Tuple ):
return self.encoder["\n"]
@property
def _UpperCamelCase( self : Optional[int] ):
return len(self.encoder )
def _UpperCamelCase( self : Dict ):
return dict(self.encoder , **self.added_tokens_encoder )
def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : Optional[Any] ):
a__ : Optional[Any] = []
for x in jieba.cut(lowerCamelCase__ , cut_all=lowerCamelCase__ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCamelCase__ ) )
return output_tokens
def _UpperCamelCase( self : str , lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Tuple ):
a__ : Tuple = [i for i in token_ids if i >= 0]
a__ : List[str] = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : Optional[int] ):
return token in self.encoder
def _UpperCamelCase( self : List[Any] , lowerCamelCase__ : List[str] ):
return "".join(lowerCamelCase__ )
def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : str ):
return self.encoder.get(lowerCamelCase__ , self.encoder.get(self.unk_token ) )
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : Tuple ):
return self.decoder.get(lowerCamelCase__ , self.unk_token )
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ):
if os.path.isdir(lowerCamelCase__ ):
a__ : Optional[int] = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
a__ : Optional[int] = (filename_prefix + "-" if filename_prefix else "") + save_directory
a__ : Dict = 0
if " " in self.encoder:
a__ : int = self.encoder[" "]
del self.encoder[" "]
if "\n" in self.encoder:
a__ : int = self.encoder["\n"]
del self.encoder["\n"]
a__ : Optional[int] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase__ : x[1] ) )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!" )
a__ : List[Any] = token_index
writer.write(token + "\n" )
index += 1
return (vocab_file,)
def _UpperCamelCase( self : Optional[Any] , lowerCamelCase__ : List[int] , lowerCamelCase__ : List[int] = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _UpperCamelCase( self : Optional[int] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None , lowerCamelCase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ ))
return [1] + ([0] * len(lowerCamelCase__ ))
| 37 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__A : List[str] = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Dict = PegasusConfig
__UpperCAmelCase : int = {}
__UpperCAmelCase : Tuple = "gelu"
def __init__( self : List[str] , lowercase__ : int , lowercase__ : Union[str, Any]=1_3 , lowercase__ : Dict=7 , lowercase__ : Optional[Any]=True , lowercase__ : str=False , lowercase__ : Optional[int]=9_9 , lowercase__ : Tuple=3_2 , lowercase__ : Any=5 , lowercase__ : Any=4 , lowercase__ : Any=3_7 , lowercase__ : Any=0.1 , lowercase__ : List[str]=0.1 , lowercase__ : Tuple=2_0 , lowercase__ : str=2 , lowercase__ : int=1 , lowercase__ : Dict=0 , ):
__lowercase : int = parent
__lowercase : str = batch_size
__lowercase : Tuple = seq_length
__lowercase : Tuple = is_training
__lowercase : Dict = use_labels
__lowercase : List[str] = vocab_size
__lowercase : int = hidden_size
__lowercase : Tuple = num_hidden_layers
__lowercase : List[Any] = num_attention_heads
__lowercase : int = intermediate_size
__lowercase : Any = hidden_dropout_prob
__lowercase : Tuple = attention_probs_dropout_prob
__lowercase : List[Any] = max_position_embeddings
__lowercase : int = eos_token_id
__lowercase : Union[str, Any] = pad_token_id
__lowercase : Union[str, Any] = bos_token_id
def snake_case ( self : int ):
__lowercase : Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
__lowercase : Union[str, Any] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
__lowercase : Union[str, Any] = np.concatenate([input_ids, eos_tensor] , axis=1 )
__lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Optional[int] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__lowercase : Optional[Any] = prepare_pegasus_inputs_dict(lowercase__ , lowercase__ , lowercase__ )
return config, inputs_dict
def snake_case ( self : str , lowercase__ : List[str] , lowercase__ : Optional[int] , lowercase__ : Union[str, Any] ):
__lowercase : Union[str, Any] = 2_0
__lowercase : List[Any] = model_class_name(lowercase__ )
__lowercase : Tuple = model.encode(inputs_dict["input_ids"] )
__lowercase ,__lowercase : Optional[Any] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__lowercase : Any = model.init_cache(decoder_input_ids.shape[0] , lowercase__ , lowercase__ )
__lowercase : List[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
__lowercase : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowercase : Optional[int] = model.decode(
decoder_input_ids[:, :-1] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=lowercase__ , decoder_position_ids=lowercase__ , )
__lowercase : List[str] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__lowercase : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase__ , )
__lowercase : List[Any] = model.decode(lowercase__ , lowercase__ )
__lowercase : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' )
def snake_case ( self : Union[str, Any] , lowercase__ : Union[str, Any] , lowercase__ : List[str] , lowercase__ : Optional[Any] ):
__lowercase : Any = 2_0
__lowercase : Any = model_class_name(lowercase__ )
__lowercase : List[Any] = model.encode(inputs_dict["input_ids"] )
__lowercase ,__lowercase : Optional[Any] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__lowercase : Union[str, Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__lowercase : Optional[int] = model.init_cache(decoder_input_ids.shape[0] , lowercase__ , lowercase__ )
__lowercase : List[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowercase : str = model.decode(
decoder_input_ids[:, :-1] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=lowercase__ , decoder_position_ids=lowercase__ , )
__lowercase : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__lowercase : Dict = model.decode(
decoder_input_ids[:, -1:] , lowercase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase__ , decoder_position_ids=lowercase__ , )
__lowercase : Union[str, Any] = model.decode(lowercase__ , lowercase__ , decoder_attention_mask=lowercase__ )
__lowercase : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' )
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase=None, _lowerCamelCase=None, ) ->int:
"""simple docstring"""
if attention_mask is None:
__lowercase : List[str] = np.not_equal(_lowerCamelCase, config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
__lowercase : Optional[int] = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape, dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ).astype(np.inta ),
], axis=-1, )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class lowerCAmelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : int = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__UpperCAmelCase : Optional[int] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__UpperCAmelCase : Dict = True
__UpperCAmelCase : int = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Optional[int] = False
def snake_case ( self : List[Any] ):
__lowercase : Optional[Any] = FlaxPegasusModelTester(self )
__lowercase : Optional[Any] = ConfigTester(self , config_class=lowercase__ )
def snake_case ( self : List[Any] ):
self.config_tester.run_common_tests()
def snake_case ( self : Optional[Any] ):
__lowercase ,__lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase__ , lowercase__ , lowercase__ )
def snake_case ( self : Optional[int] ):
__lowercase ,__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase__ , lowercase__ , lowercase__ )
def snake_case ( self : Tuple ):
__lowercase ,__lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase : Union[str, Any] = self._prepare_for_class(lowercase__ , lowercase__ )
__lowercase : List[str] = model_class(lowercase__ )
@jax.jit
def encode_jitted(lowercase__ : List[str] , lowercase__ : int=None , **lowercase__ : Tuple ):
return model.encode(input_ids=lowercase__ , attention_mask=lowercase__ )
with self.subTest("JIT Enabled" ):
__lowercase : List[Any] = encode_jitted(**lowercase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__lowercase : Optional[Any] = encode_jitted(**lowercase__ ).to_tuple()
self.assertEqual(len(lowercase__ ) , len(lowercase__ ) )
for jitted_output, output in zip(lowercase__ , lowercase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case ( self : Optional[Any] ):
__lowercase ,__lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase : Union[str, Any] = model_class(lowercase__ )
__lowercase : List[str] = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
__lowercase : Optional[int] = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(lowercase__ : Optional[int] , lowercase__ : Optional[int] , lowercase__ : Any ):
return model.decode(
decoder_input_ids=lowercase__ , decoder_attention_mask=lowercase__ , encoder_outputs=lowercase__ , )
with self.subTest("JIT Enabled" ):
__lowercase : Tuple = decode_jitted(**lowercase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__lowercase : Any = decode_jitted(**lowercase__ ).to_tuple()
self.assertEqual(len(lowercase__ ) , len(lowercase__ ) )
for jitted_output, output in zip(lowercase__ , lowercase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def snake_case ( self : Any ):
for model_class_name in self.all_model_classes:
__lowercase : int = model_class_name.from_pretrained("google/pegasus-large" , from_pt=lowercase__ )
__lowercase : Any = np.ones((1, 1) )
__lowercase : Tuple = model(lowercase__ )
self.assertIsNotNone(lowercase__ )
@slow
def snake_case ( self : Optional[int] ):
__lowercase : str = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
__lowercase : Optional[Any] = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
__lowercase : Any = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
__lowercase : Union[str, Any] = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
__lowercase : Tuple = tokenizer(lowercase__ , return_tensors="np" , truncation=lowercase__ , max_length=5_1_2 , padding=lowercase__ )
__lowercase : Tuple = model.generate(**lowercase__ , num_beams=2 ).sequences
__lowercase : str = tokenizer.batch_decode(lowercase__ , skip_special_tokens=lowercase__ )
assert tgt_text == decoded
| 575 | 0 |
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> Any:
"""simple docstring"""
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
_UpperCamelCase : Tuple = np.full((len(lowerCAmelCase__ ), sequence_length, 2) ,lowerCAmelCase__ )
else:
_UpperCamelCase : str = np.full((len(lowerCAmelCase__ ), sequence_length) ,lowerCAmelCase__ )
for i, tensor in enumerate(lowerCAmelCase__ ):
if padding_side == "right":
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
_UpperCamelCase : Optional[int] = tensor[:sequence_length]
else:
_UpperCamelCase : Union[str, Any] = tensor[:sequence_length]
else:
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
_UpperCamelCase : List[Any] = tensor[:sequence_length]
else:
_UpperCamelCase : Union[str, Any] = tensor[:sequence_length]
return out_tensor.tolist()
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : Dict = ord(lowerCAmelCase__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
_UpperCamelCase : Optional[Any] = unicodedata.category(lowerCAmelCase__ )
if cat.startswith("P" ):
return True
return False
@dataclass
class __SCREAMING_SNAKE_CASE ( __a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :PreTrainedTokenizerBase
SCREAMING_SNAKE_CASE__ :Union[bool, str, PaddingStrategy] = True
SCREAMING_SNAKE_CASE__ :Optional[int] = None
SCREAMING_SNAKE_CASE__ :Optional[int] = None
SCREAMING_SNAKE_CASE__ :int = -100
SCREAMING_SNAKE_CASE__ :str = "pt"
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : List[str] ) -> List[str]:
import torch
_UpperCamelCase : Tuple = "label" if "label" in features[0].keys() else "labels"
_UpperCamelCase : Optional[Any] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
_UpperCamelCase : Dict = self.tokenizer.pad(
snake_case__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" if labels is None else None , )
if labels is None:
return batch
_UpperCamelCase : Any = torch.tensor(batch["entity_ids"] ).shape[1]
_UpperCamelCase : Union[str, Any] = self.tokenizer.padding_side
if padding_side == "right":
_UpperCamelCase : Dict = [
list(snake_case__ ) + [self.label_pad_token_id] * (sequence_length - len(snake_case__ )) for label in labels
]
else:
_UpperCamelCase : int = [
[self.label_pad_token_id] * (sequence_length - len(snake_case__ )) + list(snake_case__ ) for label in labels
]
_UpperCamelCase : Any = [feature["ner_tags"] for feature in features]
_UpperCamelCase : List[Any] = padding_tensor(snake_case__ , -1 , snake_case__ , snake_case__ )
_UpperCamelCase : str = [feature["original_entity_spans"] for feature in features]
_UpperCamelCase : List[str] = padding_tensor(snake_case__ , (-1, -1) , snake_case__ , snake_case__ )
_UpperCamelCase : Optional[int] = {k: torch.tensor(snake_case__ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 705 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = LEDConfig
SCREAMING_SNAKE_CASE__ :str = {}
SCREAMING_SNAKE_CASE__ :List[str] = "gelu"
def __init__( self : List[Any] , __a : Union[str, Any] , __a : List[Any]=13 , __a : int=7 , __a : str=True , __a : Any=False , __a : str=99 , __a : str=32 , __a : Union[str, Any]=2 , __a : Optional[Any]=4 , __a : List[Any]=37 , __a : List[Any]=0.1 , __a : Tuple=0.1 , __a : Dict=20 , __a : str=2 , __a : Dict=1 , __a : Any=0 , __a : List[Any]=4 , ) -> List[Any]:
_UpperCamelCase : Optional[Any] = parent
_UpperCamelCase : List[str] = batch_size
_UpperCamelCase : str = seq_length
_UpperCamelCase : str = is_training
_UpperCamelCase : Any = use_labels
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : List[str] = hidden_size
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : Dict = num_attention_heads
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : int = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : str = max_position_embeddings
_UpperCamelCase : int = eos_token_id
_UpperCamelCase : Dict = pad_token_id
_UpperCamelCase : Optional[Any] = bos_token_id
_UpperCamelCase : str = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_UpperCamelCase : List[str] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_UpperCamelCase : int = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCamelCase : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCamelCase : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_UpperCamelCase : Dict = prepare_led_inputs_dict(__a , __a , __a )
_UpperCamelCase : Union[str, Any] = tf.concat(
[tf.zeros_like(__a )[:, :-1], tf.ones_like(__a )[:, -1:]] , axis=-1 , )
_UpperCamelCase : Union[str, Any] = global_attention_mask
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : int ) -> Tuple:
_UpperCamelCase : Tuple = TFLEDModel(config=__a ).get_decoder()
_UpperCamelCase : Tuple = inputs_dict["input_ids"]
_UpperCamelCase : int = input_ids[:1, :]
_UpperCamelCase : List[str] = inputs_dict["attention_mask"][:1, :]
_UpperCamelCase : List[Any] = 1
# first forward pass
_UpperCamelCase : Any = model(__a , attention_mask=__a , use_cache=__a )
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCamelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCamelCase : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_UpperCamelCase : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
_UpperCamelCase : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_UpperCamelCase : Tuple = model(__a , attention_mask=__a )[0]
_UpperCamelCase : int = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_UpperCamelCase : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_UpperCamelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_UpperCamelCase : Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1e-3 )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=None ,lowercase_=None ,) -> Dict:
"""simple docstring"""
if attention_mask is None:
_UpperCamelCase : str = tf.cast(tf.math.not_equal(lowercase_ ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
_UpperCamelCase : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
_UpperCamelCase : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCamelCase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ :List[str] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ :List[str] = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ :Tuple = True
SCREAMING_SNAKE_CASE__ :str = False
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
SCREAMING_SNAKE_CASE__ :int = False
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
_UpperCamelCase : int = TFLEDModelTester(self )
_UpperCamelCase : Any = ConfigTester(self , config_class=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase, _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : Optional[int] = tf.zeros_like(inputs_dict["attention_mask"] )
_UpperCamelCase : Union[str, Any] = 2
_UpperCamelCase : str = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
_UpperCamelCase : Dict = True
_UpperCamelCase : str = self.model_tester.seq_length
_UpperCamelCase : Union[str, Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__a : Optional[int] ):
_UpperCamelCase : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__a : Optional[Any] ):
_UpperCamelCase : Union[str, Any] = [t.numpy() for t in outputs.encoder_attentions]
_UpperCamelCase : List[Any] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_UpperCamelCase : Dict = True
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : int = False
_UpperCamelCase : Optional[int] = model_class(__a )
_UpperCamelCase : int = model(self._prepare_for_class(__a , __a ) )
_UpperCamelCase : Any = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
_UpperCamelCase : Optional[Any] = model_class(__a )
_UpperCamelCase : List[Any] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_UpperCamelCase : int = True
_UpperCamelCase : Tuple = model_class(__a )
_UpperCamelCase : str = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
_UpperCamelCase : Any = True
_UpperCamelCase : List[str] = True
_UpperCamelCase : Tuple = model_class(__a )
_UpperCamelCase : int = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
# TODO: Head-masking not yet implement
pass
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
return tf.constant(lowercase_ ,dtype=tf.intaa )
lowerCamelCase__ = 1E-4
@slow
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase : Any = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
_UpperCamelCase : int = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Tuple = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
_UpperCamelCase : Optional[int] = model(**__a )[0]
_UpperCamelCase : Optional[int] = (1, 1024, 768)
self.assertEqual(output.shape , __a )
# change to expected output here
_UpperCamelCase : Tuple = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-3 )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : Optional[int] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
_UpperCamelCase : Optional[int] = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : List[str] = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
_UpperCamelCase : Union[str, Any] = model(**__a )[0]
_UpperCamelCase : int = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , __a )
# change to expected output here
_UpperCamelCase : Optional[int] = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-3 , rtol=1e-3 )
| 51 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowercase ( unittest.TestCase ):
def __init__( self , A__ , A__=7 , A__=3 , A__=18 , A__=30 , A__=4_00 , A__=True , A__=None , A__=True , ) -> Tuple:
snake_case = size if size is not None else {'''height''': 18, '''width''': 18}
snake_case = parent
snake_case = batch_size
snake_case = num_channels
snake_case = image_size
snake_case = min_resolution
snake_case = max_resolution
snake_case = do_resize
snake_case = size
snake_case = apply_ocr
def UpperCamelCase ( self ) -> int:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _lowercase ( snake_case_ , unittest.TestCase ):
_UpperCAmelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def UpperCamelCase ( self ) -> str:
snake_case = LayoutLMvaImageProcessingTester(self )
@property
def UpperCamelCase ( self ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self ) -> List[Any]:
snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , '''do_resize''' ) )
self.assertTrue(hasattr(A__ , '''size''' ) )
self.assertTrue(hasattr(A__ , '''apply_ocr''' ) )
def UpperCamelCase ( self ) -> str:
snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def UpperCamelCase ( self ) -> Any:
pass
def UpperCamelCase ( self ) -> Optional[int]:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , A__ )
self.assertIsInstance(encoding.boxes , A__ )
# Test batched
snake_case = image_processing(A__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def UpperCamelCase ( self ) -> Union[str, Any]:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , np.ndarray )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
snake_case = image_processing(A__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def UpperCamelCase ( self ) -> Union[str, Any]:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , torch.Tensor )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
snake_case = image_processing(A__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def UpperCamelCase ( self ) -> Optional[Any]:
# with apply_OCR = True
snake_case = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
snake_case = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
snake_case = image_processing(A__ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
snake_case = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , A__ )
self.assertListEqual(encoding.boxes , A__ )
# with apply_OCR = False
snake_case = LayoutLMvaImageProcessor(apply_ocr=A__ )
snake_case = image_processing(A__ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 342 |
__a :Optional[int] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)]
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
A_ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
__a :list[bool | None] = [None] * 1000_0000
__a :Optional[Any] = True
__a :List[Any] = False
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
A_ = chain(next_number(__UpperCamelCase ) )
A_ = number_chain
while number < 1000_0000:
A_ = number_chain
number *= 10
return number_chain
def __snake_case ( __UpperCamelCase : int = 1000_0000 ):
"""simple docstring"""
for i in range(1 ,__UpperCamelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{solution() = }") | 86 | 0 |
def snake_case (UpperCamelCase : List[Any] ):
'''simple docstring'''
lowerCamelCase__ = [0] * len(UpperCamelCase )
lowerCamelCase__ = []
lowerCamelCase__ = [1] * len(UpperCamelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(UpperCamelCase ) ):
if indegree[i] == 0:
queue.append(UpperCamelCase )
while queue:
lowerCamelCase__ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
lowerCamelCase__ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(UpperCamelCase )
print(max(UpperCamelCase ) )
# Adjacency list of Graph
a__ : int = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 235 |
def snake_case (UpperCamelCase : int = 50 ):
'''simple docstring'''
lowerCamelCase__ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 235 | 1 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
SCREAMING_SNAKE_CASE__ = pytest.mark.integration
@pytest.mark.parametrize("path" ,["paws", "csv"] )
def lowerCamelCase ( _snake_case : Union[str, Any] ,_snake_case : str ):
'''simple docstring'''
inspect_dataset(_snake_case ,_snake_case )
lowercase__ = path + ".py"
assert script_name in os.listdir(_snake_case )
assert "__pycache__" not in os.listdir(_snake_case )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" ,["accuracy"] )
def lowerCamelCase ( _snake_case : List[Any] ,_snake_case : Dict ):
'''simple docstring'''
inspect_metric(_snake_case ,_snake_case )
lowercase__ = path + ".py"
assert script_name in os.listdir(_snake_case )
assert "__pycache__" not in os.listdir(_snake_case )
@pytest.mark.parametrize(
"path, config_name, expected_splits" ,[
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] ,)
def lowerCamelCase ( _snake_case : List[str] ,_snake_case : Dict ,_snake_case : Union[str, Any] ):
'''simple docstring'''
lowercase__ = get_dataset_config_info(_snake_case ,config_name=_snake_case )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" ,[
("paws", None, ValueError),
] ,)
def lowerCamelCase ( _snake_case : Any ,_snake_case : List[str] ,_snake_case : Any ):
'''simple docstring'''
with pytest.raises(_snake_case ):
get_dataset_config_info(_snake_case ,config_name=_snake_case )
@pytest.mark.parametrize(
"path, expected" ,[
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] ,)
def lowerCamelCase ( _snake_case : int ,_snake_case : List[str] ):
'''simple docstring'''
lowercase__ = get_dataset_config_names(_snake_case )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" ,[
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] ,)
def lowerCamelCase ( _snake_case : Union[str, Any] ,_snake_case : Any ,_snake_case : int ):
'''simple docstring'''
lowercase__ = get_dataset_infos(_snake_case )
assert list(infos.keys() ) == expected_configs
lowercase__ = expected_configs[0]
assert expected_config in infos
lowercase__ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" ,[
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] ,)
def lowerCamelCase ( _snake_case : Optional[int] ,_snake_case : Any ,_snake_case : List[Any] ):
'''simple docstring'''
lowercase__ = get_dataset_infos(_snake_case )
assert expected_config in infos
lowercase__ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" ,[
("paws", None, ValueError),
] ,)
def lowerCamelCase ( _snake_case : str ,_snake_case : Tuple ,_snake_case : Optional[Any] ):
'''simple docstring'''
with pytest.raises(_snake_case ):
get_dataset_split_names(_snake_case ,config_name=_snake_case )
| 267 |
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class snake_case (UpperCamelCase , UpperCamelCase ):
@register_to_config
def __init__( self ,UpperCAmelCase_ = 768 ,) -> List[Any]:
super().__init__()
lowercase__ = nn.Parameter(torch.zeros(1 ,UpperCAmelCase_ ) )
lowercase__ = nn.Parameter(torch.ones(1 ,UpperCAmelCase_ ) )
def _a ( self ,UpperCAmelCase_ = None ,UpperCAmelCase_ = None ,) -> Any:
lowercase__ = nn.Parameter(self.mean.to(UpperCAmelCase_ ).to(UpperCAmelCase_ ) )
lowercase__ = nn.Parameter(self.std.to(UpperCAmelCase_ ).to(UpperCAmelCase_ ) )
return self
def _a ( self ,UpperCAmelCase_ ) -> Tuple:
lowercase__ = (embeds - self.mean) * 1.0 / self.std
return embeds
def _a ( self ,UpperCAmelCase_ ) -> List[str]:
lowercase__ = (embeds * self.std) + self.mean
return embeds
| 267 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
__lowercase = logging.get_logger('''transformers.models.encodec''')
__lowercase = {
'''quantizer.vq.layers.*._codebook.inited''': '''quantizer.layers.*.codebook.inited''',
'''quantizer.vq.layers.*._codebook.cluster_size''': '''quantizer.layers.*.codebook.cluster_size''',
'''quantizer.vq.layers.*._codebook.embed''': '''quantizer.layers.*.codebook.embed''',
'''quantizer.vq.layers.*._codebook.embed_avg''': '''quantizer.layers.*.codebook.embed_avg''',
}
__lowercase = {
'''encoder.model.0.conv.conv''': '''encoder.layers.0.conv''',
'''encoder.model.1.block.1.conv.conv''': '''encoder.layers.1.block.1.conv''',
'''encoder.model.1.block.3.conv.conv''': '''encoder.layers.1.block.3.conv''',
'''encoder.model.1.shortcut.conv.conv''': '''encoder.layers.1.shortcut.conv''',
'''encoder.model.3.conv.conv''': '''encoder.layers.3.conv''',
'''encoder.model.4.block.1.conv.conv''': '''encoder.layers.4.block.1.conv''',
'''encoder.model.4.block.3.conv.conv''': '''encoder.layers.4.block.3.conv''',
'''encoder.model.4.shortcut.conv.conv''': '''encoder.layers.4.shortcut.conv''',
'''encoder.model.6.conv.conv''': '''encoder.layers.6.conv''',
'''encoder.model.7.block.1.conv.conv''': '''encoder.layers.7.block.1.conv''',
'''encoder.model.7.block.3.conv.conv''': '''encoder.layers.7.block.3.conv''',
'''encoder.model.7.shortcut.conv.conv''': '''encoder.layers.7.shortcut.conv''',
'''encoder.model.9.conv.conv''': '''encoder.layers.9.conv''',
'''encoder.model.10.block.1.conv.conv''': '''encoder.layers.10.block.1.conv''',
'''encoder.model.10.block.3.conv.conv''': '''encoder.layers.10.block.3.conv''',
'''encoder.model.10.shortcut.conv.conv''': '''encoder.layers.10.shortcut.conv''',
'''encoder.model.12.conv.conv''': '''encoder.layers.12.conv''',
'''encoder.model.13.lstm''': '''encoder.layers.13.lstm''',
'''encoder.model.15.conv.conv''': '''encoder.layers.15.conv''',
}
__lowercase = {
'''encoder.model.0.conv.norm''': '''encoder.layers.0.norm''',
'''encoder.model.1.block.1.conv.norm''': '''encoder.layers.1.block.1.norm''',
'''encoder.model.1.block.3.conv.norm''': '''encoder.layers.1.block.3.norm''',
'''encoder.model.1.shortcut.conv.norm''': '''encoder.layers.1.shortcut.norm''',
'''encoder.model.3.conv.norm''': '''encoder.layers.3.norm''',
'''encoder.model.4.block.1.conv.norm''': '''encoder.layers.4.block.1.norm''',
'''encoder.model.4.block.3.conv.norm''': '''encoder.layers.4.block.3.norm''',
'''encoder.model.4.shortcut.conv.norm''': '''encoder.layers.4.shortcut.norm''',
'''encoder.model.6.conv.norm''': '''encoder.layers.6.norm''',
'''encoder.model.7.block.1.conv.norm''': '''encoder.layers.7.block.1.norm''',
'''encoder.model.7.block.3.conv.norm''': '''encoder.layers.7.block.3.norm''',
'''encoder.model.7.shortcut.conv.norm''': '''encoder.layers.7.shortcut.norm''',
'''encoder.model.9.conv.norm''': '''encoder.layers.9.norm''',
'''encoder.model.10.block.1.conv.norm''': '''encoder.layers.10.block.1.norm''',
'''encoder.model.10.block.3.conv.norm''': '''encoder.layers.10.block.3.norm''',
'''encoder.model.10.shortcut.conv.norm''': '''encoder.layers.10.shortcut.norm''',
'''encoder.model.12.conv.norm''': '''encoder.layers.12.norm''',
'''encoder.model.15.conv.norm''': '''encoder.layers.15.norm''',
}
__lowercase = {
'''decoder.model.0.conv.conv''': '''decoder.layers.0.conv''',
'''decoder.model.1.lstm''': '''decoder.layers.1.lstm''',
'''decoder.model.3.convtr.convtr''': '''decoder.layers.3.conv''',
'''decoder.model.4.block.1.conv.conv''': '''decoder.layers.4.block.1.conv''',
'''decoder.model.4.block.3.conv.conv''': '''decoder.layers.4.block.3.conv''',
'''decoder.model.4.shortcut.conv.conv''': '''decoder.layers.4.shortcut.conv''',
'''decoder.model.6.convtr.convtr''': '''decoder.layers.6.conv''',
'''decoder.model.7.block.1.conv.conv''': '''decoder.layers.7.block.1.conv''',
'''decoder.model.7.block.3.conv.conv''': '''decoder.layers.7.block.3.conv''',
'''decoder.model.7.shortcut.conv.conv''': '''decoder.layers.7.shortcut.conv''',
'''decoder.model.9.convtr.convtr''': '''decoder.layers.9.conv''',
'''decoder.model.10.block.1.conv.conv''': '''decoder.layers.10.block.1.conv''',
'''decoder.model.10.block.3.conv.conv''': '''decoder.layers.10.block.3.conv''',
'''decoder.model.10.shortcut.conv.conv''': '''decoder.layers.10.shortcut.conv''',
'''decoder.model.12.convtr.convtr''': '''decoder.layers.12.conv''',
'''decoder.model.13.block.1.conv.conv''': '''decoder.layers.13.block.1.conv''',
'''decoder.model.13.block.3.conv.conv''': '''decoder.layers.13.block.3.conv''',
'''decoder.model.13.shortcut.conv.conv''': '''decoder.layers.13.shortcut.conv''',
'''decoder.model.15.conv.conv''': '''decoder.layers.15.conv''',
}
__lowercase = {
'''decoder.model.0.conv.norm''': '''decoder.layers.0.norm''',
'''decoder.model.3.convtr.norm''': '''decoder.layers.3.norm''',
'''decoder.model.4.block.1.conv.norm''': '''decoder.layers.4.block.1.norm''',
'''decoder.model.4.block.3.conv.norm''': '''decoder.layers.4.block.3.norm''',
'''decoder.model.4.shortcut.conv.norm''': '''decoder.layers.4.shortcut.norm''',
'''decoder.model.6.convtr.norm''': '''decoder.layers.6.norm''',
'''decoder.model.7.block.1.conv.norm''': '''decoder.layers.7.block.1.norm''',
'''decoder.model.7.block.3.conv.norm''': '''decoder.layers.7.block.3.norm''',
'''decoder.model.7.shortcut.conv.norm''': '''decoder.layers.7.shortcut.norm''',
'''decoder.model.9.convtr.norm''': '''decoder.layers.9.norm''',
'''decoder.model.10.block.1.conv.norm''': '''decoder.layers.10.block.1.norm''',
'''decoder.model.10.block.3.conv.norm''': '''decoder.layers.10.block.3.norm''',
'''decoder.model.10.shortcut.conv.norm''': '''decoder.layers.10.shortcut.norm''',
'''decoder.model.12.convtr.norm''': '''decoder.layers.12.norm''',
'''decoder.model.13.block.1.conv.norm''': '''decoder.layers.13.block.1.norm''',
'''decoder.model.13.block.3.conv.norm''': '''decoder.layers.13.block.3.norm''',
'''decoder.model.13.shortcut.conv.norm''': '''decoder.layers.13.shortcut.norm''',
'''decoder.model.15.conv.norm''': '''decoder.layers.15.norm''',
}
__lowercase = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
__lowercase = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
__lowercase = []
__lowercase = []
def lowerCAmelCase (__UpperCamelCase : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : int ):
"""simple docstring"""
for attribute in key.split('''.''' ):
__UpperCamelCase =getattr(__UpperCamelCase , __UpperCamelCase )
if weight_type is not None:
__UpperCamelCase =getattr(__UpperCamelCase , __UpperCamelCase ).shape
else:
__UpperCamelCase =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__UpperCamelCase =value
elif weight_type == "weight_g":
__UpperCamelCase =value
elif weight_type == "weight_v":
__UpperCamelCase =value
elif weight_type == "bias":
__UpperCamelCase =value
elif weight_type == "running_mean":
__UpperCamelCase =value
elif weight_type == "running_var":
__UpperCamelCase =value
elif weight_type == "num_batches_tracked":
__UpperCamelCase =value
elif weight_type == "weight_ih_l0":
__UpperCamelCase =value
elif weight_type == "weight_hh_l0":
__UpperCamelCase =value
elif weight_type == "bias_ih_l0":
__UpperCamelCase =value
elif weight_type == "bias_hh_l0":
__UpperCamelCase =value
elif weight_type == "weight_ih_l1":
__UpperCamelCase =value
elif weight_type == "weight_hh_l1":
__UpperCamelCase =value
elif weight_type == "bias_ih_l1":
__UpperCamelCase =value
elif weight_type == "bias_hh_l1":
__UpperCamelCase =value
else:
__UpperCamelCase =value
logger.info(F"""{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.""" )
def lowerCAmelCase (__UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] ):
"""simple docstring"""
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__UpperCamelCase , __UpperCamelCase =key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowerCAmelCase (__UpperCamelCase : List[Any] , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
__UpperCamelCase =[]
if model_name == "encodec_24khz" or "encodec_32khz":
__UpperCamelCase =MAPPING_24K
elif model_name == "encodec_48khz":
__UpperCamelCase =MAPPING_48K
else:
raise ValueError(F"""Unsupported model: {model_name}""" )
for name, value in orig_dict.items():
if should_ignore(__UpperCamelCase , __UpperCamelCase ):
logger.info(F"""{name} was ignored""" )
continue
__UpperCamelCase =False
for key, mapped_key in MAPPING.items():
if "*" in key:
__UpperCamelCase , __UpperCamelCase =key.split('''.*.''' )
if prefix in name and suffix in name:
__UpperCamelCase =suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('''embed''' ) and name.endswith('''embed_avg''' ):
continue
__UpperCamelCase =True
if "*" in mapped_key:
__UpperCamelCase =name.split(__UpperCamelCase )[0].split('''.''' )[-2]
__UpperCamelCase =mapped_key.replace('''*''' , __UpperCamelCase )
if "weight_g" in name:
__UpperCamelCase ='''weight_g'''
elif "weight_v" in name:
__UpperCamelCase ='''weight_v'''
elif "weight_ih_l0" in name:
__UpperCamelCase ='''weight_ih_l0'''
elif "weight_hh_l0" in name:
__UpperCamelCase ='''weight_hh_l0'''
elif "bias_ih_l0" in name:
__UpperCamelCase ='''bias_ih_l0'''
elif "bias_hh_l0" in name:
__UpperCamelCase ='''bias_hh_l0'''
elif "weight_ih_l1" in name:
__UpperCamelCase ='''weight_ih_l1'''
elif "weight_hh_l1" in name:
__UpperCamelCase ='''weight_hh_l1'''
elif "bias_ih_l1" in name:
__UpperCamelCase ='''bias_ih_l1'''
elif "bias_hh_l1" in name:
__UpperCamelCase ='''bias_hh_l1'''
elif "bias" in name:
__UpperCamelCase ='''bias'''
elif "weight" in name:
__UpperCamelCase ='''weight'''
elif "running_mean" in name:
__UpperCamelCase ='''running_mean'''
elif "running_var" in name:
__UpperCamelCase ='''running_var'''
elif "num_batches_tracked" in name:
__UpperCamelCase ='''num_batches_tracked'''
else:
__UpperCamelCase =None
set_recursively(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(F"""Unused weights: {unused_weights}""" )
@torch.no_grad()
def lowerCAmelCase (__UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : List[str]=None , __UpperCamelCase : Dict=None , ):
"""simple docstring"""
if config_path is not None:
__UpperCamelCase =EncodecConfig.from_pretrained(__UpperCamelCase )
else:
__UpperCamelCase =EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
__UpperCamelCase =[8, 5, 4, 4]
__UpperCamelCase =[2.2]
__UpperCamelCase =6_4
__UpperCamelCase =3_2_0_0_0
__UpperCamelCase =2_0_4_8
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =False
elif model_name == "encodec_48khz":
__UpperCamelCase =[8, 5, 4, 2]
__UpperCamelCase =[3.0, 6.0, 1_2.0, 2_4.0]
__UpperCamelCase =4_8_0_0_0
__UpperCamelCase =2
__UpperCamelCase =False
__UpperCamelCase ='''time_group_norm'''
__UpperCamelCase =True
__UpperCamelCase =1.0
__UpperCamelCase =0.0_1
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
__UpperCamelCase =EncodecModel(__UpperCamelCase )
__UpperCamelCase =EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(__UpperCamelCase )
__UpperCamelCase =torch.load(__UpperCamelCase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
__UpperCamelCase =original_checkpoint['''best_state''']
recursively_load_weights(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
if repo_id:
print('''Pushing to the hub...''' )
feature_extractor.push_to_hub(__UpperCamelCase )
model.push_to_hub(__UpperCamelCase )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--model''',
default='''encodec_24khz''',
type=str,
help='''The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
__lowercase = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 296 | """simple docstring"""
import heapq as hq
import math
from collections.abc import Iterator
class _lowercase :
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase__ : int ) -> List[str]:
'''simple docstring'''
__UpperCamelCase =str(id_ )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =[]
__UpperCamelCase ={} # {vertex:distance}
def __lt__( self : Dict , UpperCamelCase__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
return self.key < other.key
def __repr__( self : int ) -> Tuple:
'''simple docstring'''
return self.id
def UpperCAmelCase_ ( self : List[Any] , UpperCamelCase__ : int ) -> Optional[Any]:
'''simple docstring'''
self.neighbors.append(UpperCamelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] ) -> Tuple:
'''simple docstring'''
__UpperCamelCase =weight
def lowerCAmelCase (__UpperCamelCase : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : List[str] , __UpperCamelCase : int ):
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __UpperCamelCase )
graph[b - 1].add_edge(graph[a - 1] , __UpperCamelCase )
def lowerCAmelCase (__UpperCamelCase : list , __UpperCamelCase : Vertex ):
"""simple docstring"""
__UpperCamelCase =[]
for u in graph:
__UpperCamelCase =math.inf
__UpperCamelCase =None
__UpperCamelCase =0
__UpperCamelCase =graph[:]
while q:
__UpperCamelCase =min(__UpperCamelCase )
q.remove(__UpperCamelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
__UpperCamelCase =u
__UpperCamelCase =u.edges[v.id]
for i in range(1 , len(__UpperCamelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def lowerCAmelCase (__UpperCamelCase : list , __UpperCamelCase : Vertex ):
"""simple docstring"""
for u in graph:
__UpperCamelCase =math.inf
__UpperCamelCase =None
__UpperCamelCase =0
__UpperCamelCase =list(__UpperCamelCase )
hq.heapify(__UpperCamelCase )
while h:
__UpperCamelCase =hq.heappop(__UpperCamelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
__UpperCamelCase =u
__UpperCamelCase =u.edges[v.id]
hq.heapify(__UpperCamelCase )
for i in range(1 , len(__UpperCamelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def lowerCAmelCase ():
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 296 | 1 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class snake_case ( __snake_case ):
"""simple docstring"""
__lowerCAmelCase = ["""image_processor""", """tokenizer"""]
__lowerCAmelCase = """OwlViTImageProcessor"""
__lowerCAmelCase = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ):
__lowercase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCAmelCase_ , )
__lowercase = kwargs.pop("feature_extractor" )
__lowercase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
def __call__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_="max_length" , lowerCAmelCase_="np" , **lowerCAmelCase_ ):
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or (isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and not isinstance(text[0] , lowerCAmelCase_ )):
__lowercase = [self.tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )]
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(text[0] , lowerCAmelCase_ ):
__lowercase = []
# Maximum number of queries across batch
__lowercase = max([len(lowerCAmelCase_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(lowerCAmelCase_ ) != max_num_queries:
__lowercase = t + [" "] * (max_num_queries - len(lowerCAmelCase_ ))
__lowercase = self.tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
encodings.append(lowerCAmelCase_ )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
__lowercase = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__lowercase = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__lowercase = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__lowercase = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__lowercase = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
__lowercase = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__lowercase = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
__lowercase = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
__lowercase = BatchEncoding()
__lowercase = input_ids
__lowercase = attention_mask
if query_images is not None:
__lowercase = BatchEncoding()
__lowercase = self.image_processor(
lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ ).pixel_values
__lowercase = query_pixel_values
if images is not None:
__lowercase = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
if text is not None and images is not None:
__lowercase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__lowercase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase_ ) , tensor_type=lowerCAmelCase_ )
def snake_case__ ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
return self.image_processor.post_process(*lowerCAmelCase_ , **lowerCAmelCase_ )
def snake_case__ ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
return self.image_processor.post_process_object_detection(*lowerCAmelCase_ , **lowerCAmelCase_ )
def snake_case__ ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
return self.image_processor.post_process_image_guided_detection(*lowerCAmelCase_ , **lowerCAmelCase_ )
def snake_case__ ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def snake_case__ ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def snake_case__ ( self ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowerCAmelCase_ , )
return self.image_processor_class
@property
def snake_case__ ( self ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowerCAmelCase_ , )
return self.image_processor
| 321 | import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=2 , lowerCAmelCase_=56 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=2 , lowerCAmelCase_=2 , lowerCAmelCase_=7 , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=512 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=4 , lowerCAmelCase_="block_sparse" , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=2 , lowerCAmelCase_=3 , ):
__lowercase = parent
__lowercase = batch_size
__lowercase = seq_length
__lowercase = is_training
__lowercase = use_attention_mask
__lowercase = use_token_type_ids
__lowercase = use_labels
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = num_choices
__lowercase = rescale_embeddings
__lowercase = attention_type
__lowercase = use_bias
__lowercase = block_size
__lowercase = num_random_blocks
def snake_case__ ( self ):
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_attention_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self ):
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_flax
class snake_case ( __snake_case ,unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
__lowerCAmelCase = False
__lowerCAmelCase = False
def snake_case__ ( self ):
__lowercase = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
super().test_hidden_states_output()
@slow
def snake_case__ ( self ):
for model_class_name in self.all_model_classes:
__lowercase = model_class_name.from_pretrained("google/bigbird-roberta-base" )
self.assertIsNotNone(lowerCAmelCase_ )
def snake_case__ ( self ):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def snake_case__ ( self ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase = model_class(lowerCAmelCase_ )
@jax.jit
def model_jitted(lowerCAmelCase_ , lowerCAmelCase_=None , **lowerCAmelCase_ ):
return model(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ )
with self.subTest("JIT Enabled" ):
__lowercase = model_jitted(**lowerCAmelCase_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__lowercase = model_jitted(**lowerCAmelCase_ ).to_tuple()
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
for jitted_output, output in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1E-5 , lowerCAmelCase_="outputs" , lowerCAmelCase_=None ):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith("outputs.attentions" ):
return
else:
super().check_pt_flax_outputs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
| 321 | 1 |
'''simple docstring'''
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
debug_launcher(test_script.main )
def _lowerCAmelCase ( self ):
'''simple docstring'''
debug_launcher(test_ops.main )
| 460 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : int = """ylacombe/bark-small"""
a_ : Dict = tempfile.mkdtemp()
a_ : Union[str, Any] = """en_speaker_1"""
a_ : Dict = """This is a test string"""
a_ : Optional[int] = """speaker_embeddings_path.json"""
a_ : int = """speaker_embeddings"""
def _lowerCAmelCase ( self , **lowerCAmelCase_ ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCAmelCase_ )
def _lowerCAmelCase ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Union[str, Any] = self.get_tokenizer()
a_ : Optional[Any] = BarkProcessor(tokenizer=lowerCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
a_ : List[str] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Any = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
a_ : List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
a_ : Any = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
a_ : Dict = 35
a_ : List[Any] = 2
a_ : Optional[int] = 8
a_ : int = {
"""semantic_prompt""": np.ones(lowerCAmelCase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
a_ : Optional[Any] = processor(text=self.input_string , voice_preset=lowerCAmelCase_ )
a_ : Tuple = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
a_ : Any = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(lowerCAmelCase_ , **lowerCAmelCase_ )
a_ : Any = processor(text=self.input_string , voice_preset=lowerCAmelCase_ )
a_ : List[Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCAmelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
a_ : Any = processor(text=self.input_string , voice_preset=self.voice_preset )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Tuple = self.get_tokenizer()
a_ : Union[str, Any] = BarkProcessor(tokenizer=lowerCAmelCase_ )
a_ : Optional[int] = processor(text=self.input_string )
a_ : Optional[int] = tokenizer(
self.input_string , padding="""max_length""" , max_length=2_56 , add_special_tokens=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 460 | 1 |
"""simple docstring"""
UpperCamelCase = """Tobias Carryer"""
from time import time
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=int(time() ) ) -> Any: # noqa: B008
A__ = multiplier
A__ = increment
A__ = modulo
A__ = seed
def snake_case__ ( self ) -> Any:
A__ = (self.multiplier * self.seed + self.increment) % self.modulo
return self.seed
if __name__ == "__main__":
# Show the LCG in action.
UpperCamelCase = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31)
while True:
print(lcg.next_number())
| 104 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def __magic_name__ ( __lowerCAmelCase : Any ) -> Optional[Any]:
__lowerCamelCase = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def __magic_name__ ( __lowerCAmelCase : List[str] ) -> List[str]:
__lowerCamelCase , __lowerCamelCase = emb.weight.shape
__lowerCamelCase = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
__lowerCamelCase = emb.weight.data
return lin_layer
def __magic_name__ ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : int=None ) -> Optional[int]:
__lowerCamelCase = {}
for old_key in state_dict.keys():
__lowerCamelCase = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
__lowerCamelCase = key.replace('''moe_layer.experts.0''' , f'''ffn.experts.expert_{expert_idx}''' )
else:
__lowerCamelCase = key.replace('''moe_layer.experts.''' , '''ffn.experts.expert_''' )
if "gate" in key:
__lowerCamelCase = key.replace('''.moe_layer.gate.wg''' , '''.ffn.router.classifier''' )
if "fc2" and "experts" not in key:
__lowerCamelCase = key.replace('''.fc2.''' , '''.ffn.fc2.''' )
if "fc1" and "experts" not in key:
__lowerCamelCase = key.replace('''.fc1.''' , '''.ffn.fc1.''' )
if ".encoder_attn." in key:
__lowerCamelCase = key.replace('''.encoder_attn.''' , '''.cross_attention.''' )
if "encoder_attn_layer_norm" in key:
__lowerCamelCase = key.replace('''encoder_attn_layer_norm''' , '''cross_attention_layer_norm''' )
if "final_layer_norm" in key:
__lowerCamelCase = key.replace('''final_layer_norm''' , '''ff_layer_norm''' )
__lowerCamelCase = state_dict[old_key]
return new_dict
def __magic_name__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Any , __lowerCAmelCase : str = WEIGHTS_NAME ) -> Dict:
__lowerCamelCase = []
__lowerCamelCase = 0
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
for expert in range(__lowerCAmelCase ):
__lowerCamelCase = switch_checkpoint_path + f'''-rank-{expert}.pt'''
if os.path.isfile(__lowerCAmelCase ):
__lowerCamelCase = torch.load(__lowerCAmelCase )['''model''']
remove_ignore_keys_(__lowerCAmelCase )
__lowerCamelCase = rename_fairseq_keys(__lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = os.path.join(
__lowerCAmelCase , weights_name.replace('''.bin''' , f'''-{len(__lowerCAmelCase )+1:05d}-of-???.bin''' ) )
torch.save(__lowerCAmelCase , __lowerCAmelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__lowerCAmelCase )[0]].dtype )
# Add the last block
__lowerCamelCase = os.path.join(__lowerCAmelCase , weights_name.replace('''.bin''' , f'''-{len(__lowerCAmelCase )+1:05d}-of-???.bin''' ) )
__lowerCamelCase = torch.load(switch_checkpoint_path + '''-shared.pt''' )['''model''']
remove_ignore_keys_(__lowerCAmelCase )
__lowerCamelCase = rename_fairseq_keys(__lowerCAmelCase , __lowerCAmelCase )
__lowerCamelCase = shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__lowerCAmelCase ) == 1:
__lowerCamelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
torch.save(__lowerCAmelCase , __lowerCAmelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__lowerCAmelCase , __lowerCAmelCase )
# Otherwise, let's build the index
__lowerCamelCase = {}
for idx, shard in enumerate(__lowerCAmelCase ):
__lowerCamelCase = weights_name.replace('''.bin''' , f'''-{idx+1:05d}-of-{len(__lowerCAmelCase ):05d}.bin''' )
__lowerCamelCase = os.path.join(__lowerCAmelCase , weights_name.replace('''.bin''' , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
for key in shard:
__lowerCamelCase = shard_file
# Add the metadata
__lowerCamelCase = {'''total_size''': total_size}
__lowerCamelCase = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , '''w''' , encoding='''utf-8''' ) as f:
__lowerCamelCase = json.dumps(__lowerCAmelCase , indent=2 , sort_keys=__lowerCAmelCase ) + '''\n'''
f.write(__lowerCAmelCase )
return metadata, index
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
SCREAMING_SNAKE_CASE__ : str = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
SCREAMING_SNAKE_CASE__ : Dict = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 298 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase = {
"""configuration_altclip""": [
"""ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AltCLIPConfig""",
"""AltCLIPTextConfig""",
"""AltCLIPVisionConfig""",
],
"""processing_altclip""": ["""AltCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AltCLIPPreTrainedModel""",
"""AltCLIPModel""",
"""AltCLIPTextModel""",
"""AltCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 351 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __snake_case :
'''simple docstring'''
@staticmethod
def _a ( *a_ , **a_ ):
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __snake_case ( unittest.TestCase):
'''simple docstring'''
UpperCamelCase__ : Dict = MODEL_FOR_OBJECT_DETECTION_MAPPING
def _a ( self , a_ , a_ , a_ ):
a__ = ObjectDetectionPipeline(model=a_ , image_processor=a_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def _a ( self , a_ , a_ ):
a__ = object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" , threshold=0.0 )
self.assertGreater(len(a_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
a_ , {
"""score""": ANY(a_ ),
"""label""": ANY(a_ ),
"""box""": {"""xmin""": ANY(a_ ), """ymin""": ANY(a_ ), """xmax""": ANY(a_ ), """ymax""": ANY(a_ )},
} , )
import datasets
a__ = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
a__ = [
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
]
a__ = object_detector(a_ , threshold=0.0 )
self.assertEqual(len(a_ ) , len(a_ ) )
for outputs in batch_outputs:
self.assertGreater(len(a_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
a_ , {
"""score""": ANY(a_ ),
"""label""": ANY(a_ ),
"""box""": {"""xmin""": ANY(a_ ), """ymin""": ANY(a_ ), """xmax""": ANY(a_ ), """ymax""": ANY(a_ )},
} , )
@require_tf
@unittest.skip("""Object detection not implemented in TF""" )
def _a ( self ):
pass
@require_torch
def _a ( self ):
a__ = """hf-internal-testing/tiny-detr-mobilenetsv3"""
a__ = AutoModelForObjectDetection.from_pretrained(a_ )
a__ = AutoFeatureExtractor.from_pretrained(a_ )
a__ = ObjectDetectionPipeline(model=a_ , feature_extractor=a_ )
a__ = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=0.0 )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
{"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
] , )
a__ = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
[
{"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
[
{"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
{"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 159, """ymin""": 120, """xmax""": 480, """ymax""": 359}},
],
] , )
@require_torch
@slow
def _a ( self ):
a__ = """facebook/detr-resnet-50"""
a__ = AutoModelForObjectDetection.from_pretrained(a_ )
a__ = AutoFeatureExtractor.from_pretrained(a_ )
a__ = ObjectDetectionPipeline(model=a_ , feature_extractor=a_ )
a__ = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
{"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
a__ = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
[
{"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def _a ( self ):
a__ = """facebook/detr-resnet-50"""
a__ = pipeline("""object-detection""" , model=a_ )
a__ = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
{"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
a__ = object_detector(
[
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
] )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
[
{"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
[
{"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 40, """ymin""": 70, """xmax""": 175, """ymax""": 117}},
{"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 333, """ymin""": 72, """xmax""": 368, """ymax""": 187}},
{"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 639, """ymax""": 473}},
{"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
],
] , )
@require_torch
@slow
def _a ( self ):
a__ = 0.9_985
a__ = """facebook/detr-resnet-50"""
a__ = pipeline("""object-detection""" , model=a_ )
a__ = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=a_ )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
{"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 13, """ymin""": 52, """xmax""": 314, """ymax""": 470}},
{"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 345, """ymin""": 23, """xmax""": 640, """ymax""": 368}},
] , )
@require_torch
@require_pytesseract
@slow
def _a ( self ):
a__ = """Narsil/layoutlmv3-finetuned-funsd"""
a__ = 0.9_993
a__ = pipeline("""object-detection""" , model=a_ , threshold=a_ )
a__ = object_detector(
"""https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""" )
self.assertEqual(
nested_simplify(a_ , decimals=4 ) , [
{"""score""": 0.9_993, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
{"""score""": 0.9_993, """label""": """I-ANSWER""", """box""": {"""xmin""": 294, """ymin""": 254, """xmax""": 343, """ymax""": 264}},
] , )
| 351 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowercase: List[Any] = {
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: str = [
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: Dict = [
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_lowercase: Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 192 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase: Dict = logging.get_logger(__name__)
def _lowerCamelCase ( snake_case ):
_lowerCAmelCase = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
_lowerCAmelCase = [144, 192, 240]
_lowerCAmelCase = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
_lowerCAmelCase = [96, 120, 144]
_lowerCAmelCase = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
_lowerCAmelCase = [64, 80, 96]
_lowerCAmelCase = [16, 16, 24, 48, 64, 80, 320]
_lowerCAmelCase = 0.05
_lowerCAmelCase = 2.0
if mobilevit_name.startswith('deeplabv3_' ):
_lowerCAmelCase = 512
_lowerCAmelCase = 16
_lowerCAmelCase = 21
_lowerCAmelCase = 'pascal-voc-id2label.json'
else:
_lowerCAmelCase = 1_000
_lowerCAmelCase = 'imagenet-1k-id2label.json'
_lowerCAmelCase = 'huggingface/label-files'
_lowerCAmelCase = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='dataset' ) , 'r' ) )
_lowerCAmelCase = {int(snake_case ): v for k, v in idalabel.items()}
_lowerCAmelCase = idalabel
_lowerCAmelCase = {v: k for k, v in idalabel.items()}
return config
def _lowerCamelCase ( snake_case , snake_case=False ):
for i in range(1 , 6 ):
if F'layer_{i}.' in name:
_lowerCAmelCase = name.replace(F'layer_{i}.' , F'encoder.layer.{i - 1}.' )
if "conv_1." in name:
_lowerCAmelCase = name.replace('conv_1.' , 'conv_stem.' )
if ".block." in name:
_lowerCAmelCase = name.replace('.block.' , '.' )
if "exp_1x1" in name:
_lowerCAmelCase = name.replace('exp_1x1' , 'expand_1x1' )
if "red_1x1" in name:
_lowerCAmelCase = name.replace('red_1x1' , 'reduce_1x1' )
if ".local_rep.conv_3x3." in name:
_lowerCAmelCase = name.replace('.local_rep.conv_3x3.' , '.conv_kxk.' )
if ".local_rep.conv_1x1." in name:
_lowerCAmelCase = name.replace('.local_rep.conv_1x1.' , '.conv_1x1.' )
if ".norm." in name:
_lowerCAmelCase = name.replace('.norm.' , '.normalization.' )
if ".conv." in name:
_lowerCAmelCase = name.replace('.conv.' , '.convolution.' )
if ".conv_proj." in name:
_lowerCAmelCase = name.replace('.conv_proj.' , '.conv_projection.' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F'.{i}.{j}.' in name:
_lowerCAmelCase = name.replace(F'.{i}.{j}.' , F'.{i}.layer.{j}.' )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F'.{i}.{j}.' in name:
_lowerCAmelCase = name.replace(F'.{i}.{j}.' , F'.{i}.' )
if "expand_1x1" in name:
_lowerCAmelCase = name.replace('expand_1x1' , 'downsampling_layer.expand_1x1' )
if "conv_3x3" in name:
_lowerCAmelCase = name.replace('conv_3x3' , 'downsampling_layer.conv_3x3' )
if "reduce_1x1" in name:
_lowerCAmelCase = name.replace('reduce_1x1' , 'downsampling_layer.reduce_1x1' )
for i in range(2 , 5 ):
if F'.global_rep.{i}.weight' in name:
_lowerCAmelCase = name.replace(F'.global_rep.{i}.weight' , '.layernorm.weight' )
if F'.global_rep.{i}.bias' in name:
_lowerCAmelCase = name.replace(F'.global_rep.{i}.bias' , '.layernorm.bias' )
if ".global_rep." in name:
_lowerCAmelCase = name.replace('.global_rep.' , '.transformer.' )
if ".pre_norm_mha.0." in name:
_lowerCAmelCase = name.replace('.pre_norm_mha.0.' , '.layernorm_before.' )
if ".pre_norm_mha.1.out_proj." in name:
_lowerCAmelCase = name.replace('.pre_norm_mha.1.out_proj.' , '.attention.output.dense.' )
if ".pre_norm_ffn.0." in name:
_lowerCAmelCase = name.replace('.pre_norm_ffn.0.' , '.layernorm_after.' )
if ".pre_norm_ffn.1." in name:
_lowerCAmelCase = name.replace('.pre_norm_ffn.1.' , '.intermediate.dense.' )
if ".pre_norm_ffn.4." in name:
_lowerCAmelCase = name.replace('.pre_norm_ffn.4.' , '.output.dense.' )
if ".transformer." in name:
_lowerCAmelCase = name.replace('.transformer.' , '.transformer.layer.' )
if ".aspp_layer." in name:
_lowerCAmelCase = name.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in name:
_lowerCAmelCase = name.replace('.aspp_pool.' , '.' )
if "seg_head." in name:
_lowerCAmelCase = name.replace('seg_head.' , 'segmentation_head.' )
if "segmentation_head.classifier.classifier." in name:
_lowerCAmelCase = name.replace('segmentation_head.classifier.classifier.' , 'segmentation_head.classifier.' )
if "classifier.fc." in name:
_lowerCAmelCase = name.replace('classifier.fc.' , 'classifier.' )
elif (not base_model) and ("segmentation_head." not in name):
_lowerCAmelCase = 'mobilevit.' + name
return name
def _lowerCamelCase ( snake_case , snake_case , snake_case=False ):
if base_model:
_lowerCAmelCase = ''
else:
_lowerCAmelCase = 'mobilevit.'
for key in orig_state_dict.copy().keys():
_lowerCAmelCase = orig_state_dict.pop(snake_case )
if key[:8] == "encoder.":
_lowerCAmelCase = key[8:]
if "qkv" in key:
_lowerCAmelCase = key.split('.' )
_lowerCAmelCase = int(key_split[0][6:] ) - 1
_lowerCAmelCase = int(key_split[3] )
_lowerCAmelCase = model.get_submodule(F'{model_prefix}encoder.layer.{layer_num}' )
_lowerCAmelCase = layer.transformer.layer[transformer_num].attention.attention.all_head_size
_lowerCAmelCase = (
F'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'
)
if "weight" in key:
_lowerCAmelCase = val[:dim, :]
_lowerCAmelCase = val[dim : dim * 2, :]
_lowerCAmelCase = val[-dim:, :]
else:
_lowerCAmelCase = val[:dim]
_lowerCAmelCase = val[dim : dim * 2]
_lowerCAmelCase = val[-dim:]
else:
_lowerCAmelCase = val
return orig_state_dict
def _lowerCamelCase ( ):
_lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowerCAmelCase = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return im
@torch.no_grad()
def _lowerCamelCase ( snake_case , snake_case , snake_case , snake_case=False ):
_lowerCAmelCase = get_mobilevit_config(snake_case )
# load original state_dict
_lowerCAmelCase = torch.load(snake_case , map_location='cpu' )
# load 🤗 model
if mobilevit_name.startswith('deeplabv3_' ):
_lowerCAmelCase = MobileViTForSemanticSegmentation(snake_case ).eval()
else:
_lowerCAmelCase = MobileViTForImageClassification(snake_case ).eval()
_lowerCAmelCase = convert_state_dict(snake_case , snake_case )
model.load_state_dict(snake_case )
# Check outputs on an image, prepared by MobileViTImageProcessor
_lowerCAmelCase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_lowerCAmelCase = image_processor(images=prepare_img() , return_tensors='pt' )
_lowerCAmelCase = model(**snake_case )
_lowerCAmelCase = outputs.logits
if mobilevit_name.startswith('deeplabv3_' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
_lowerCAmelCase = torch.tensor(
[
[[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],
[[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],
[[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
_lowerCAmelCase = torch.tensor(
[
[[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],
[[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],
[[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
_lowerCAmelCase = torch.tensor(
[
[[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],
] )
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}' )
assert torch.allclose(logits[0, :3, :3, :3] , snake_case , atol=1E-4 )
else:
assert logits.shape == (1, 1_000)
if mobilevit_name == "mobilevit_s":
_lowerCAmelCase = torch.tensor([-0.98_66, 0.23_92, -1.12_41] )
elif mobilevit_name == "mobilevit_xs":
_lowerCAmelCase = torch.tensor([-2.47_61, -0.93_99, -1.95_87] )
elif mobilevit_name == "mobilevit_xxs":
_lowerCAmelCase = torch.tensor([-1.93_64, -1.23_27, -0.46_53] )
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}' )
assert torch.allclose(logits[0, :3] , snake_case , atol=1E-4 )
Path(snake_case ).mkdir(exist_ok=snake_case )
print(F'Saving model {mobilevit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case )
if push_to_hub:
_lowerCAmelCase = {
'mobilevit_s': 'mobilevit-small',
'mobilevit_xs': 'mobilevit-x-small',
'mobilevit_xxs': 'mobilevit-xx-small',
'deeplabv3_mobilevit_s': 'deeplabv3-mobilevit-small',
'deeplabv3_mobilevit_xs': 'deeplabv3-mobilevit-x-small',
'deeplabv3_mobilevit_xxs': 'deeplabv3-mobilevit-xx-small',
}
print('Pushing to the hub...' )
_lowerCAmelCase = model_mapping[mobilevit_name]
image_processor.push_to_hub(snake_case , organization='apple' )
model.push_to_hub(snake_case , organization='apple' )
if __name__ == "__main__":
_lowercase: Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_lowercase: List[str] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 192 | 1 |
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Any=13 , __UpperCamelCase : Optional[Any]=2 , __UpperCamelCase : List[str]=24 , __UpperCamelCase : int=16 , __UpperCamelCase : Tuple=True , __UpperCamelCase : int=True , __UpperCamelCase : Optional[int]=32 , __UpperCamelCase : Optional[int]=5 , __UpperCamelCase : Any=4 , __UpperCamelCase : Tuple=37 , __UpperCamelCase : List[Any]="gelu" , __UpperCamelCase : str=0.1 , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : Optional[int]=10 , __UpperCamelCase : int=0.02 , __UpperCamelCase : str=None , __UpperCamelCase : int=2 , __UpperCamelCase : int=2 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = max_length
_UpperCAmelCase = num_mel_bins
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = scope
_UpperCAmelCase = frequency_stride
_UpperCAmelCase = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_UpperCAmelCase = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
_UpperCAmelCase = (self.max_length - self.patch_size) // self.time_stride + 1
_UpperCAmelCase = frequency_out_dimension * time_out_dimension
_UpperCAmelCase = num_patches + 2
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = self.get_config()
return config, input_values, labels
def UpperCAmelCase__ ( self : Union[str, Any] ):
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def UpperCAmelCase__ ( self : Optional[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int ):
_UpperCAmelCase = ASTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"input_values": input_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowercase , lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : Tuple = (
{"""audio-classification""": ASTForAudioClassification, """feature-extraction""": ASTModel}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : int = False
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : int , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] ):
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase = ASTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def UpperCAmelCase__ ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="AST does not use inputs_embeds" )
def UpperCAmelCase__ ( self : Union[str, Any] ):
pass
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def UpperCAmelCase__ ( self : List[str] ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["input_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def UpperCAmelCase__ ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
@slow
def UpperCAmelCase__ ( self : str ):
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = ASTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def __lowerCamelCase ( ) -> int:
_UpperCAmelCase = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" )
_UpperCAmelCase , _UpperCAmelCase = torchaudio.load(_lowerCAmelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
@cached_property
def UpperCAmelCase__ ( self : Tuple ):
return (
ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" )
if is_torchaudio_available()
else None
)
@slow
def UpperCAmelCase__ ( self : Dict ):
_UpperCAmelCase = self.default_feature_extractor
_UpperCAmelCase = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(__UpperCamelCase )
_UpperCAmelCase = self.default_feature_extractor
_UpperCAmelCase , _UpperCAmelCase = prepare_audio()
_UpperCAmelCase = audio.squeeze().numpy()
_UpperCAmelCase = feature_extractor(__UpperCamelCase , sampling_rate=__UpperCamelCase , return_tensors="pt" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**__UpperCamelCase )
# verify the logits
_UpperCAmelCase = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
_UpperCAmelCase = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
| 129 |
import warnings
warnings.warn(
"memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: "
"`from accelerate import find_executable_batch_size` to avoid this warning.",
FutureWarning,
)
| 129 | 1 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_a = logging.get_logger(__name__)
_a = {
"""facebook/detr-resnet-50""": """https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json""",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 'detr'
lowercase__ = ['past_key_values']
lowercase__ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __a=True , __a=None , __a=3 , __a=1_00 , __a=6 , __a=20_48 , __a=8 , __a=6 , __a=20_48 , __a=8 , __a=0.0 , __a=0.0 , __a=True , __a="relu" , __a=2_56 , __a=0.1 , __a=0.0 , __a=0.0 , __a=0.02 , __a=1.0 , __a=False , __a="sine" , __a="resnet50" , __a=True , __a=False , __a=1 , __a=5 , __a=2 , __a=1 , __a=1 , __a=5 , __a=2 , __a=0.1 , **__a , ) -> int:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''')
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''')
_UpperCamelCase = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''])
elif isinstance(__a , __a):
_UpperCamelCase = backbone_config.get('''model_type''')
_UpperCamelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCamelCase = config_class.from_dict(__a)
# set timm attributes to None
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None, None, None
_UpperCamelCase = use_timm_backbone
_UpperCamelCase = backbone_config
_UpperCamelCase = num_channels
_UpperCamelCase = num_queries
_UpperCamelCase = d_model
_UpperCamelCase = encoder_ffn_dim
_UpperCamelCase = encoder_layers
_UpperCamelCase = encoder_attention_heads
_UpperCamelCase = decoder_ffn_dim
_UpperCamelCase = decoder_layers
_UpperCamelCase = decoder_attention_heads
_UpperCamelCase = dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = activation_dropout
_UpperCamelCase = activation_function
_UpperCamelCase = init_std
_UpperCamelCase = init_xavier_std
_UpperCamelCase = encoder_layerdrop
_UpperCamelCase = decoder_layerdrop
_UpperCamelCase = encoder_layers
_UpperCamelCase = auxiliary_loss
_UpperCamelCase = position_embedding_type
_UpperCamelCase = backbone
_UpperCamelCase = use_pretrained_backbone
_UpperCamelCase = dilation
# Hungarian matcher
_UpperCamelCase = class_cost
_UpperCamelCase = bbox_cost
_UpperCamelCase = giou_cost
# Loss coefficients
_UpperCamelCase = mask_loss_coefficient
_UpperCamelCase = dice_loss_coefficient
_UpperCamelCase = bbox_loss_coefficient
_UpperCamelCase = giou_loss_coefficient
_UpperCamelCase = eos_coefficient
super().__init__(is_encoder_decoder=__a , **__a)
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self.d_model
@classmethod
def UpperCAmelCase ( cls , __a , **__a) -> int:
'''simple docstring'''
return cls(backbone_config=__a , **__a)
def UpperCAmelCase ( self) -> Dict[str, any]:
'''simple docstring'''
_UpperCamelCase = copy.deepcopy(self.__dict__)
if output["backbone_config"] is not None:
_UpperCamelCase = self.backbone_config.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = version.parse('1.11' )
@property
def UpperCAmelCase ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
])
@property
def UpperCAmelCase ( self) -> float:
'''simple docstring'''
return 1e-5
@property
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return 12
| 19 |
def UpperCAmelCase__ ( lowerCamelCase_ : list[int] , lowerCamelCase_ : list[int] ):
# Check if the input is valid
if not len(lowerCamelCase_ ) == len(lowerCamelCase_ ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
__a , __a , __a : Optional[Any] = equationa
__a , __a , __a : Optional[int] = equationa
# Calculate the determinants of the matrices
__a : str = aa * ba - aa * ba
__a : Tuple = ca * ba - ca * ba
__a : Union[str, Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
__a : Any = determinant_x / determinant
__a : Optional[Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 47 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=False , _a=True , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , ):
__a = parent
__a = batch_size
__a = num_channels
__a = image_size
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size if size is not None else {'''height''': 18, '''width''': 20}
__a = do_thumbnail
__a = do_align_axis
__a = do_pad
__a = do_normalize
__a = image_mean
__a = image_std
def __UpperCAmelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = DonutImageProcessor if is_vision_available() else None
def __UpperCAmelCase ( self ):
__a = DonutImageProcessingTester(self )
@property
def __UpperCAmelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase ( self ):
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''do_thumbnail''' ) )
self.assertTrue(hasattr(_a , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(_a , '''do_pad''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
self.assertTrue(hasattr(_a , '''image_mean''' ) )
self.assertTrue(hasattr(_a , '''image_std''' ) )
def __UpperCAmelCase ( self ):
__a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
__a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
__a = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def __UpperCAmelCase ( self ):
pass
@is_flaky()
def __UpperCAmelCase ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a = image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def __UpperCAmelCase ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a = image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def __UpperCAmelCase ( self ):
# Initialize image_processing
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__a = image_processing(_a , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 65 |
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
lowercase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a=None , **_a ):
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , _a , )
super().__init__(args=_a , **_a )
| 65 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
__SCREAMING_SNAKE_CASE =TypeVar("T")
__SCREAMING_SNAKE_CASE =TypeVar("U")
class UpperCamelCase ( Generic[T, U] ):
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ : Optional[Any] = key
lowercase_ : Optional[int] = val
lowercase_ : DoubleLinkedListNode[T, U] | None = None
lowercase_ : DoubleLinkedListNode[T, U] | None = None
def __repr__( self ) -> str:
'''simple docstring'''
return (
f'''Node: key: {self.key}, val: {self.val}, '''
f'''has next: {bool(self.next )}, has prev: {bool(self.prev )}'''
)
class UpperCamelCase ( Generic[T, U] ):
def __init__( self ) -> None:
'''simple docstring'''
lowercase_ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowerCAmelCase_ ,lowerCAmelCase_ )
lowercase_ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowerCAmelCase_ ,lowerCAmelCase_ )
lowercase_ : Union[str, Any] = self.rear, self.head
def __repr__( self ) -> str:
'''simple docstring'''
lowercase_ : Tuple = ["DoubleLinkedList"]
lowercase_ : List[Any] = self.head
while node.next is not None:
rep.append(str(lowerCAmelCase_ ) )
lowercase_ : Tuple = node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowerCAmelCase_ )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> None:
'''simple docstring'''
lowercase_ : Dict = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
lowercase_ : List[Any] = node
lowercase_ : Any = previous
lowercase_ : List[Any] = node
lowercase_ : Dict = self.rear
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> DoubleLinkedListNode[T, U] | None:
'''simple docstring'''
if node.prev is None or node.next is None:
return None
lowercase_ : List[Any] = node.next
lowercase_ : Optional[Any] = node.prev
lowercase_ : List[str] = None
lowercase_ : str = None
return node
class UpperCamelCase ( Generic[T, U] ):
lowercase = {}
def __init__( self ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ : DoubleLinkedList[T, U] = DoubleLinkedList()
lowercase_ : List[str] = capacity
lowercase_ : Tuple = 0
lowercase_ : List[str] = 0
lowercase_ : List[str] = 0
lowercase_ : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self ) -> str:
'''simple docstring'''
return (
f'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
f'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__( self ,__UpperCamelCase ) -> bool:
'''simple docstring'''
return key in self.cache
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> U | None:
'''simple docstring'''
if key in self.cache:
self.hits += 1
lowercase_ : DoubleLinkedListNode[T, U] = self.cache[key]
lowercase_ : int = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowerCAmelCase_ )
return node.val
self.miss += 1
return None
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> None:
'''simple docstring'''
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
lowercase_ : str = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowerCAmelCase_ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
lowercase_ : List[Any] = DoubleLinkedListNode(lowerCAmelCase_ ,lowerCAmelCase_ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
lowercase_ : Optional[Any] = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
lowercase_ : str = value
self.list.add(lowerCAmelCase_ )
@classmethod
def _UpperCAmelCase ( cls ,__UpperCamelCase = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
'''simple docstring'''
def cache_decorator_inner(__UpperCamelCase ) -> Callable[..., U]:
def cache_decorator_wrapper(*__UpperCamelCase ) -> U:
if func not in cls.decorator_function_to_instance_map:
lowercase_ : Any = LRUCache(lowerCAmelCase_ )
lowercase_ : List[Any] = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
lowercase_ : str = func(*lowerCAmelCase_ )
cls.decorator_function_to_instance_map[func].put(args[0] ,lowerCAmelCase_ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowerCAmelCase_ ,'cache_info' ,lowerCAmelCase_ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 425 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = torch.device('''cpu''')
def snake_case ( ):
UpperCAmelCase_ : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ : str = Image.open(requests.get(A__ ,stream=A__ ).raw )
return im
def snake_case ( A__ ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : Tuple = dct.pop(A__ )
UpperCAmelCase_ : Optional[Any] = val
def snake_case ( A__ ):
UpperCAmelCase_ : List[str] = []
for k in state_dict.keys():
UpperCAmelCase_ : Union[str, Any] = k
if ".pwconv" in k:
UpperCAmelCase_ : Dict = k_new.replace(".pwconv" ,".point_wise_conv" )
if ".dwconv" in k:
UpperCAmelCase_ : Any = k_new.replace(".dwconv" ,".depth_wise_conv" )
if ".Proj." in k:
UpperCAmelCase_ : Dict = k_new.replace(".Proj." ,".proj." )
if "patch_embed" in k_new:
UpperCAmelCase_ : Tuple = k_new.replace("patch_embed" ,"swiftformer.patch_embed.patch_embedding" )
if "network" in k_new:
UpperCAmelCase_ : List[Any] = k_new.split("." )
if ls[2].isdigit():
UpperCAmelCase_ : Tuple = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] )
else:
UpperCAmelCase_ : Optional[Any] = k_new.replace("network" ,"swiftformer.encoder.network" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : Optional[int] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase_ : Optional[Any] = 10_00
UpperCAmelCase_ : str = "huggingface/label-files"
UpperCAmelCase_ : str = "imagenet-1k-id2label.json"
UpperCAmelCase_ : List[str] = json.load(open(hf_hub_download(A__ ,A__ ,repo_type="dataset" ) ,"r" ) )
UpperCAmelCase_ : Tuple = {int(A__ ): v for k, v in idalabel.items()}
UpperCAmelCase_ : List[Any] = idalabel
UpperCAmelCase_ : Optional[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
UpperCAmelCase_ : Tuple = [3, 3, 6, 4]
UpperCAmelCase_ : str = [48, 56, 1_12, 2_20]
elif swiftformer_name == "swiftformer_s":
UpperCAmelCase_ : Optional[Any] = [3, 3, 9, 6]
UpperCAmelCase_ : Optional[Any] = [48, 64, 1_68, 2_24]
elif swiftformer_name == "swiftformer_l1":
UpperCAmelCase_ : int = [4, 3, 10, 5]
UpperCAmelCase_ : Union[str, Any] = [48, 96, 1_92, 3_84]
elif swiftformer_name == "swiftformer_l3":
UpperCAmelCase_ : Dict = [4, 4, 12, 6]
UpperCAmelCase_ : Optional[int] = [64, 1_28, 3_20, 5_12]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("https" ):
UpperCAmelCase_ : List[Any] = torch.hub.load_state_dict_from_url(A__ ,map_location="cpu" ,check_hash=A__ )
else:
UpperCAmelCase_ : Any = torch.load(A__ ,map_location="cpu" )
UpperCAmelCase_ : List[str] = checkpoint
UpperCAmelCase_ : Dict = create_rename_keys(A__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(A__ ,A__ ,A__ )
# load HuggingFace model
UpperCAmelCase_ : Optional[int] = SwiftFormerForImageClassification(A__ ).eval()
hf_model.load_state_dict(A__ )
# prepare test inputs
UpperCAmelCase_ : Tuple = prepare_img()
UpperCAmelCase_ : int = ViTImageProcessor.from_pretrained("preprocessor_config" )
UpperCAmelCase_ : int = processor(images=A__ ,return_tensors="pt" )
# compare outputs from both models
UpperCAmelCase_ : List[Any] = get_expected_output(A__ )
UpperCAmelCase_ : int = hf_model(inputs["pixel_values"] ).logits
assert hf_logits.shape == torch.Size([1, 10_00] )
assert torch.allclose(hf_logits[0, 0:5] ,A__ ,atol=1e-3 )
Path(A__ ).mkdir(exist_ok=A__ )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(A__ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swiftformer_name''',
default='''swiftformer_xs''',
choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''],
type=str,
help='''Name of the SwiftFormer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''./converted_outputs/''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''')
lowerCamelCase_ = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 95 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__A : Any = {
"configuration_groupvit": [
"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"GroupViTConfig",
"GroupViTOnnxConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GroupViTModel",
"GroupViTPreTrainedModel",
"GroupViTTextModel",
"GroupViTVisionModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGroupViTModel",
"TFGroupViTPreTrainedModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__A : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 595 | """simple docstring"""
from __future__ import annotations
__A : Union[str, Any] = []
def lowercase ( UpperCamelCase : list[list[int]] , UpperCamelCase : int , UpperCamelCase : int ):
"""simple docstring"""
for i in range(len(UpperCamelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(UpperCamelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , len(UpperCamelCase ) ) ):
if board[i][j] == 1:
return False
return True
def lowercase ( UpperCamelCase : list[list[int]] , UpperCamelCase : int ):
"""simple docstring"""
if row >= len(UpperCamelCase ):
solution.append(UpperCamelCase )
printboard(UpperCamelCase )
print()
return True
for i in range(len(UpperCamelCase ) ):
if is_safe(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
A__ : Optional[Any] =1
solve(UpperCamelCase , row + 1 )
A__ : Union[str, Any] =0
return False
def lowercase ( UpperCamelCase : list[list[int]] ):
"""simple docstring"""
for i in range(len(UpperCamelCase ) ):
for j in range(len(UpperCamelCase ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
__A : List[Any] = 8
__A : Dict = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("The total no. of solutions are :", len(solution))
| 595 | 1 |
"""simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
snake_case = logging.getLogger(__name__)
def snake_case ( lowerCAmelCase_ ) -> List[str]:
_snake_case = git.Repo(search_parent_directories=lowerCAmelCase_ )
_snake_case = {
'''repo_id''': str(lowerCAmelCase_ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
}
with open(os.path.join(lowerCAmelCase_ , '''git_log.json''' ) , '''w''' ) as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ , indent=4 )
def snake_case ( lowerCAmelCase_ ) -> int:
if params.n_gpu <= 0:
_snake_case = 0
_snake_case = -1
_snake_case = True
_snake_case = False
return
assert torch.cuda.is_available()
logger.info('''Initializing GPUs''' )
if params.n_gpu > 1:
assert params.local_rank != -1
_snake_case = int(os.environ['''WORLD_SIZE'''] )
_snake_case = int(os.environ['''N_GPU_NODE'''] )
_snake_case = int(os.environ['''RANK'''] )
# number of nodes / node ID
_snake_case = params.world_size // params.n_gpu_per_node
_snake_case = params.global_rank // params.n_gpu_per_node
_snake_case = True
assert params.n_nodes == int(os.environ['''N_NODES'''] )
assert params.node_id == int(os.environ['''NODE_RANK'''] )
# local job (single GPU)
else:
assert params.local_rank == -1
_snake_case = 1
_snake_case = 0
_snake_case = 0
_snake_case = 0
_snake_case = 1
_snake_case = 1
_snake_case = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
_snake_case = params.node_id == 0 and params.local_rank == 0
_snake_case = params.n_nodes > 1
# summary
_snake_case = f"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes )
logger.info(PREFIX + '''Node ID : %i''' % params.node_id )
logger.info(PREFIX + '''Local rank : %i''' % params.local_rank )
logger.info(PREFIX + '''World size : %i''' % params.world_size )
logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node )
logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) )
logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) )
logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) )
logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('''Initializing PyTorch distributed''' )
torch.distributed.init_process_group(
init_method='''env://''' , backend='''nccl''' , )
def snake_case ( lowerCAmelCase_ ) -> Dict:
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 103 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def __lowerCamelCase ( UpperCAmelCase_ : int = 8 ):
"""simple docstring"""
a :Optional[int] = ascii_letters + digits + punctuation
return "".join(secrets.choice(UpperCAmelCase_ ) for _ in range(UpperCAmelCase_ ) )
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : int ):
"""simple docstring"""
i -= len(UpperCAmelCase_ )
a :Tuple = i // 3
a :int = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
a :Union[str, Any] = (
chars_incl
+ random(UpperCAmelCase_ , quotient + remainder )
+ random(UpperCAmelCase_ , UpperCAmelCase_ )
+ random(UpperCAmelCase_ , UpperCAmelCase_ )
)
a :Dict = list(UpperCAmelCase_ )
shuffle(UpperCAmelCase_ )
return "".join(UpperCAmelCase_ )
# random is a generalised function for letters, characters and numbers
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : int ):
"""simple docstring"""
return "".join(secrets.choice(UpperCAmelCase_ ) for _ in range(UpperCAmelCase_ ) )
def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple ):
"""simple docstring"""
pass # Put your code here...
def __lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str ):
"""simple docstring"""
pass # Put your code here...
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ):
"""simple docstring"""
pass # Put your code here...
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : int = 8 ):
"""simple docstring"""
if len(UpperCAmelCase_ ) < min_length:
# Your Password must be at least 8 characters long
return False
a :Dict = any(char in ascii_uppercase for char in password )
a :Optional[int] = any(char in ascii_lowercase for char in password )
a :Tuple = any(char in digits for char in password )
a :Any = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def __lowerCamelCase ( ):
"""simple docstring"""
a :int = int(input('''Please indicate the max length of your password: ''' ).strip() )
a :Union[str, Any] = input(
'''Please indicate the characters that must be in your password: ''' ).strip()
print('''Password generated:''' , password_generator(UpperCAmelCase_ ) )
print(
'''Alternative Password generated:''' , alternative_password_generator(UpperCAmelCase_ , UpperCAmelCase_ ) , )
print('''[If you are thinking of using this passsword, You better save it.]''' )
if __name__ == "__main__":
main()
| 445 | 0 |
'''simple docstring'''
def _a ( _lowercase : Any ): # noqa: E741
'''simple docstring'''
__UpperCAmelCase : Any = len(lowerCAmelCase__ )
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : Any = [0] * n
__UpperCAmelCase : List[Any] = [False] * n
__UpperCAmelCase : int = [False] * n
def dfs(_lowercase : Any , _lowercase : Any , _lowercase : Dict , _lowercase : Optional[int] ):
if parent == root:
out_edge_count += 1
__UpperCAmelCase : int = True
__UpperCAmelCase : int = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
__UpperCAmelCase : int = dfs(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__UpperCAmelCase : int = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
__UpperCAmelCase : Tuple = True
# AP found via cycle
if at == low[to]:
__UpperCAmelCase : List[str] = True
else:
__UpperCAmelCase : List[Any] = min(low[at] , lowerCAmelCase__ )
return out_edge_count
for i in range(lowerCAmelCase__ ):
if not visited[i]:
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : str = dfs(lowerCAmelCase__ , lowerCAmelCase__ , -1 , lowerCAmelCase__ )
__UpperCAmelCase : Dict = out_edge_count > 1
for x in range(len(lowerCAmelCase__ ) ):
if is_art[x] is True:
print(lowerCAmelCase__ )
# Adjacency list of graph
__UpperCAmelCase :Optional[int] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data) | 706 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase :Optional[Any] = {
"configuration_layoutlmv2": ["LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMv2Config"],
"processing_layoutlmv2": ["LayoutLMv2Processor"],
"tokenization_layoutlmv2": ["LayoutLMv2Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase :Any = ["LayoutLMv2TokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase :int = ["LayoutLMv2FeatureExtractor"]
__UpperCAmelCase :Optional[int] = ["LayoutLMv2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase :List[Any] = [
"LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv2ForQuestionAnswering",
"LayoutLMv2ForSequenceClassification",
"LayoutLMv2ForTokenClassification",
"LayoutLMv2Layer",
"LayoutLMv2Model",
"LayoutLMv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase :Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 266 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : int = {
'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json',
}
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = """roc_bert"""
def __init__( self : Optional[Any] , UpperCamelCase : List[Any]=30_522 , UpperCamelCase : Dict=768 , UpperCamelCase : Tuple=12 , UpperCamelCase : Dict=12 , UpperCamelCase : List[str]=3_072 , UpperCamelCase : Optional[int]="gelu" , UpperCamelCase : List[str]=0.1 , UpperCamelCase : Dict=0.1 , UpperCamelCase : Union[str, Any]=512 , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : List[Any]=0.02 , UpperCamelCase : List[str]=1e-1_2 , UpperCamelCase : Any=True , UpperCamelCase : int=0 , UpperCamelCase : List[str]="absolute" , UpperCamelCase : Optional[int]=None , UpperCamelCase : int=True , UpperCamelCase : Tuple=True , UpperCamelCase : Any=768 , UpperCamelCase : int=910 , UpperCamelCase : Union[str, Any]=512 , UpperCamelCase : Tuple=24_858 , UpperCamelCase : Optional[int]=True , **UpperCamelCase : List[str] , ):
'''simple docstring'''
__UpperCAmelCase : Any = vocab_size
__UpperCAmelCase : int = max_position_embeddings
__UpperCAmelCase : List[str] = hidden_size
__UpperCAmelCase : Dict = num_hidden_layers
__UpperCAmelCase : Optional[int] = num_attention_heads
__UpperCAmelCase : Optional[Any] = intermediate_size
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : str = hidden_dropout_prob
__UpperCAmelCase : Any = attention_probs_dropout_prob
__UpperCAmelCase : Tuple = initializer_range
__UpperCAmelCase : int = type_vocab_size
__UpperCAmelCase : List[Any] = layer_norm_eps
__UpperCAmelCase : Dict = use_cache
__UpperCAmelCase : str = enable_pronunciation
__UpperCAmelCase : str = enable_shape
__UpperCAmelCase : int = pronunciation_embed_dim
__UpperCAmelCase : int = pronunciation_vocab_size
__UpperCAmelCase : List[Any] = shape_embed_dim
__UpperCAmelCase : Union[str, Any] = shape_vocab_size
__UpperCAmelCase : int = concat_input
__UpperCAmelCase : Optional[int] = position_embedding_type
__UpperCAmelCase : Dict = classifier_dropout
super().__init__(pad_token_id=UpperCamelCase , **UpperCamelCase )
| 139 |
"""simple docstring"""
def lowerCamelCase ( _UpperCamelCase : int = 1_0_0_0_0_0_0 ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[str] = 1
__UpperCAmelCase : Union[str, Any] = 1
__UpperCAmelCase : Optional[Any] = {1: 1}
for inputa in range(2 , _UpperCamelCase ):
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : str = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__UpperCAmelCase : Tuple = (3 * number) + 1
counter += 1
if inputa not in counters:
__UpperCAmelCase : Optional[Any] = counter
if counter > pre_counter:
__UpperCAmelCase : List[Any] = inputa
__UpperCAmelCase : List[Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 139 | 1 |
'''simple docstring'''
import os
from pathlib import Path
def _UpperCamelCase ( ):
"""simple docstring"""
from torch.utils.cpp_extension import load
__UpperCamelCase : Optional[int] = Path(_a ).resolve().parent.parent.parent / 'kernels' / 'deformable_detr'
__UpperCamelCase : Optional[Any] = [
root / filename
for filename in [
'vision.cpp',
os.path.join('cpu' , 'ms_deform_attn_cpu.cpp' ),
os.path.join('cuda' , 'ms_deform_attn_cuda.cu' ),
]
]
load(
'MultiScaleDeformableAttention' , _a , with_cuda=_a , extra_include_paths=[str(_a )] , extra_cflags=['-DWITH_CUDA=1'] , extra_cuda_cflags=[
'-DCUDA_HAS_FP16=1',
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 720 | '''simple docstring'''
a= '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 287 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase__ : Dict = logging.get_logger(__name__)
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = b.T
SCREAMING_SNAKE_CASE_ = np.sum(np.square(__UpperCAmelCase ) , axis=1 )
SCREAMING_SNAKE_CASE_ = np.sum(np.square(__UpperCAmelCase ) , axis=0 )
SCREAMING_SNAKE_CASE_ = np.matmul(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = aa[:, None] - 2 * ab + ba[None, :]
return d
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = x.reshape(-1 , 3 )
SCREAMING_SNAKE_CASE_ = squared_euclidean_distance(__UpperCAmelCase , __UpperCAmelCase )
return np.argmin(__UpperCAmelCase , axis=1 )
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = ["pixel_values"]
def __init__( self : List[Any] , _lowerCAmelCase : Optional[Union[List[List[int]], np.ndarray]] = None , _lowerCAmelCase : bool = True , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , _lowerCAmelCase : bool = True , _lowerCAmelCase : bool = True , **_lowerCAmelCase : List[Any] , ):
super().__init__(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = size if size is not None else {'height': 256, 'width': 256}
SCREAMING_SNAKE_CASE_ = get_size_dict(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = np.array(_lowerCAmelCase ) if clusters is not None else None
SCREAMING_SNAKE_CASE_ = do_resize
SCREAMING_SNAKE_CASE_ = size
SCREAMING_SNAKE_CASE_ = resample
SCREAMING_SNAKE_CASE_ = do_normalize
SCREAMING_SNAKE_CASE_ = do_color_quantize
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Dict[str, int] , _lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCAmelCase : Tuple , ):
SCREAMING_SNAKE_CASE_ = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"Size dictionary must contain both height and width keys. Got {size.keys()}" )
return resize(
_lowerCAmelCase , size=(size['height'], size['width']) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : np.ndarray , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , ):
SCREAMING_SNAKE_CASE_ = rescale(image=_lowerCAmelCase , scale=1 / 127.5 , data_format=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = image - 1
return image
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : ImageInput , _lowerCAmelCase : bool = None , _lowerCAmelCase : Dict[str, int] = None , _lowerCAmelCase : PILImageResampling = None , _lowerCAmelCase : bool = None , _lowerCAmelCase : Optional[bool] = None , _lowerCAmelCase : Optional[Union[List[List[int]], np.ndarray]] = None , _lowerCAmelCase : Optional[Union[str, TensorType]] = None , _lowerCAmelCase : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **_lowerCAmelCase : Optional[Any] , ):
SCREAMING_SNAKE_CASE_ = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ = get_size_dict(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
SCREAMING_SNAKE_CASE_ = clusters if clusters is not None else self.clusters
SCREAMING_SNAKE_CASE_ = np.array(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ = [to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ = [self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ = [self.normalize(image=_lowerCAmelCase ) for image in images]
if do_color_quantize:
SCREAMING_SNAKE_CASE_ = [to_channel_dimension_format(_lowerCAmelCase , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
SCREAMING_SNAKE_CASE_ = np.array(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = color_quantize(_lowerCAmelCase , _lowerCAmelCase ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
SCREAMING_SNAKE_CASE_ = images.shape[0]
SCREAMING_SNAKE_CASE_ = images.reshape(_lowerCAmelCase , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
SCREAMING_SNAKE_CASE_ = list(_lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE_ = {'input_ids': images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase ) | 31 |
def _A ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
a__ : List[str] =len(SCREAMING_SNAKE_CASE )
a__ : Optional[int] =[[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
a__ : Optional[int] =True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
a__ : str =False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
a__ : str =subset[i - 1][j]
if arr[i - 1] <= j:
a__ : Tuple =subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 563 | 0 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __a :
def __init__( self : List[str] ,lowerCamelCase : List[str] ,lowerCamelCase : Optional[int]=2 ,lowerCamelCase : List[str]=True ,lowerCamelCase : List[Any]=False ,lowerCamelCase : List[Any]=10 ,lowerCamelCase : str=3 ,lowerCamelCase : Optional[Any]=32 * 8 ,lowerCamelCase : Optional[int]=32 * 8 ,lowerCamelCase : Any=4 ,lowerCamelCase : Any=64 ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_auxiliary_loss
__SCREAMING_SNAKE_CASE = num_queries
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = min_size
__SCREAMING_SNAKE_CASE = max_size
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = hidden_dim
__SCREAMING_SNAKE_CASE = hidden_dim
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=UpperCamelCase_ ) > 0.5
).float()
__SCREAMING_SNAKE_CASE = (torch.rand((self.batch_size, self.num_labels) ,device=UpperCamelCase_ ) > 0.5).long()
__SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = MaskaFormerConfig(
hidden_size=self.hidden_dim ,)
__SCREAMING_SNAKE_CASE = self.num_queries
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = [1, 1, 1, 1]
__SCREAMING_SNAKE_CASE = self.num_channels
__SCREAMING_SNAKE_CASE = 64
__SCREAMING_SNAKE_CASE = 128
__SCREAMING_SNAKE_CASE = self.hidden_dim
__SCREAMING_SNAKE_CASE = self.hidden_dim
__SCREAMING_SNAKE_CASE = self.hidden_dim
return config
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self : str ,lowerCamelCase : List[str] ,lowerCamelCase : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = output.encoder_hidden_states
__SCREAMING_SNAKE_CASE = output.pixel_decoder_hidden_states
__SCREAMING_SNAKE_CASE = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(UpperCamelCase_ ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCamelCase_ ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(UpperCamelCase_ ) ,config.decoder_layers )
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Tuple ,lowerCamelCase : List[Any] ,lowerCamelCase : Dict ,lowerCamelCase : Union[str, Any]=False ):
'''simple docstring'''
with torch.no_grad():
__SCREAMING_SNAKE_CASE = MaskaFormerModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__SCREAMING_SNAKE_CASE = model(pixel_values=UpperCamelCase_ ,pixel_mask=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = model(UpperCamelCase_ ,output_hidden_states=UpperCamelCase_ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.hidden_dim) ,)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(UpperCamelCase_ ,UpperCamelCase_ )
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : Dict ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Tuple ,lowerCamelCase : Tuple ,lowerCamelCase : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = MaskaFormerForUniversalSegmentation(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
def comm_check_on_output(lowerCamelCase : List[Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(pixel_values=UpperCamelCase_ ,pixel_mask=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = model(UpperCamelCase_ )
comm_check_on_output(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = model(
pixel_values=UpperCamelCase_ ,pixel_mask=UpperCamelCase_ ,mask_labels=UpperCamelCase_ ,class_labels=UpperCamelCase_ )
comm_check_on_output(UpperCamelCase_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) )
@require_torch
class __a ( _snake_case, _snake_case, unittest.TestCase ):
__UpperCamelCase : List[Any] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
__UpperCamelCase : Tuple = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
__UpperCamelCase : List[str] = False
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : Tuple = False
__UpperCamelCase : Optional[Any] = False
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = MaskaFormerModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=UpperCamelCase_ ,has_text_modality=UpperCamelCase_ )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(UpperCamelCase_ ,**UpperCamelCase_ ,output_hidden_states=UpperCamelCase_ )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*UpperCamelCase_ )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,UpperCamelCase_ )
@slow
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
__SCREAMING_SNAKE_CASE = MaskaFormerModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (self.model_tester.min_size,) * 2
__SCREAMING_SNAKE_CASE = {
"pixel_values": torch.randn((2, 3, *size) ,device=UpperCamelCase_ ),
"mask_labels": torch.randn((2, 10, *size) ,device=UpperCamelCase_ ),
"class_labels": torch.zeros(2 ,10 ,device=UpperCamelCase_ ).long(),
}
__SCREAMING_SNAKE_CASE = self.model_tester.get_config()
__SCREAMING_SNAKE_CASE = MaskaFormerForUniversalSegmentation(UpperCamelCase_ ).to(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = model(**UpperCamelCase_ )
self.assertTrue(outputs.loss is not None )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(UpperCamelCase_ ,**UpperCamelCase_ ,output_hidden_states=UpperCamelCase_ )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(UpperCamelCase_ ).to(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = model(**UpperCamelCase_ ,output_attentions=UpperCamelCase_ )
self.assertTrue(outputs.attentions is not None )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
if not self.model_tester.is_training:
return
__SCREAMING_SNAKE_CASE = self.all_model_classes[1]
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.train()
__SCREAMING_SNAKE_CASE = model(UpperCamelCase_ ,mask_labels=UpperCamelCase_ ,class_labels=UpperCamelCase_ ).loss
loss.backward()
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.all_model_classes[1]
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = model_class(UpperCamelCase_ ).to(UpperCamelCase_ )
model.train()
__SCREAMING_SNAKE_CASE = model(UpperCamelCase_ ,mask_labels=UpperCamelCase_ ,class_labels=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__SCREAMING_SNAKE_CASE = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
__SCREAMING_SNAKE_CASE = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__SCREAMING_SNAKE_CASE = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=UpperCamelCase_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
a = 1E-4
def __magic_name__ ( ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class __a ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = self.default_image_processor
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = image_processor(UpperCamelCase_ ,return_tensors="""pt""" ).to(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCamelCase_ ,(1, 3, 384, 384) )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-0.2_790, -1.0_717, -1.1_668], [-0.5_128, -0.3_128, -0.4_987], [-0.5_832, 0.1_971, -0.0_197]] ).to(UpperCamelCase_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] ,UpperCamelCase_ ,atol=UpperCamelCase_ ) )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[0.8_973, 1.1_847, 1.1_776], [1.1_934, 1.5_040, 1.5_128], [1.1_153, 1.4_486, 1.4_951]] ).to(UpperCamelCase_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,UpperCamelCase_ ,atol=UpperCamelCase_ ) )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[2.1_152, 1.7_000, -0.8_603], [1.5_808, 1.8_004, -0.9_353], [1.6_043, 1.7_495, -0.5_999]] ).to(UpperCamelCase_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,UpperCamelCase_ ,atol=UpperCamelCase_ ) )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(UpperCamelCase_ ).eval()
__SCREAMING_SNAKE_CASE = self.default_image_processor
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = image_processor(UpperCamelCase_ ,return_tensors="""pt""" ).to(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(UpperCamelCase_ ,(1, 3, 384, 384) )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**UpperCamelCase_ )
# masks_queries_logits
__SCREAMING_SNAKE_CASE = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
__SCREAMING_SNAKE_CASE = [
[-8.7_839, -9.0_056, -8.8_121],
[-7.4_104, -7.0_313, -6.5_401],
[-6.6_105, -6.3_427, -6.4_675],
]
__SCREAMING_SNAKE_CASE = torch.tensor(UpperCamelCase_ ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,UpperCamelCase_ ,atol=UpperCamelCase_ ) )
# class_queries_logits
__SCREAMING_SNAKE_CASE = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape ,(1, model.config.num_queries, model.config.num_labels + 1) )
__SCREAMING_SNAKE_CASE = torch.tensor(
[
[1.8_324, -8.0_835, -4.1_922],
[0.8_450, -9.0_050, -3.6_053],
[0.3_045, -7.7_293, -3.0_275],
] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,UpperCamelCase_ ,atol=UpperCamelCase_ ) )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(UpperCamelCase_ ).eval()
__SCREAMING_SNAKE_CASE = self.default_image_processor
__SCREAMING_SNAKE_CASE = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors="""pt""" ,)
__SCREAMING_SNAKE_CASE = inputs["pixel_values"].to(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = [el.to(UpperCamelCase_ ) for el in inputs["mask_labels"]]
__SCREAMING_SNAKE_CASE = [el.to(UpperCamelCase_ ) for el in inputs["class_labels"]]
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**UpperCamelCase_ )
self.assertTrue(outputs.loss is not None )
| 710 |
'''simple docstring'''
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
a = [
"python",
"tqdm",
"regex",
"requests",
"packaging",
"filelock",
"numpy",
"tokenizers",
"huggingface-hub",
"safetensors",
"accelerate",
"pyyaml",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=None ) -> Optional[Any]:
'''simple docstring'''
require_version(deps[pkg] , __UpperCAmelCase )
| 13 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class _lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] ="roc_bert"
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int]=3_05_22 , SCREAMING_SNAKE_CASE__ : List[str]=7_68 , SCREAMING_SNAKE_CASE__ : Optional[Any]=12 , SCREAMING_SNAKE_CASE__ : List[Any]=12 , SCREAMING_SNAKE_CASE__ : Tuple=30_72 , SCREAMING_SNAKE_CASE__ : Any="gelu" , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=5_12 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : Any=0.02 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1e-12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Dict=0 , SCREAMING_SNAKE_CASE__ : List[Any]="absolute" , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : str=7_68 , SCREAMING_SNAKE_CASE__ : List[Any]=9_10 , SCREAMING_SNAKE_CASE__ : Tuple=5_12 , SCREAMING_SNAKE_CASE__ : int=2_48_58 , SCREAMING_SNAKE_CASE__ : Optional[int]=True , **SCREAMING_SNAKE_CASE__ : str , ):
"""simple docstring"""
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = type_vocab_size
UpperCamelCase = layer_norm_eps
UpperCamelCase = use_cache
UpperCamelCase = enable_pronunciation
UpperCamelCase = enable_shape
UpperCamelCase = pronunciation_embed_dim
UpperCamelCase = pronunciation_vocab_size
UpperCamelCase = shape_embed_dim
UpperCamelCase = shape_vocab_size
UpperCamelCase = concat_input
UpperCamelCase = position_embedding_type
UpperCamelCase = classifier_dropout
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 282 |
from functools import lru_cache
@lru_cache
def __lowerCamelCase ( _lowercase ) -> int:
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 282 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ ( unittest.TestCase ):
@slow
def _snake_case ( self :Union[str, Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
SCREAMING_SNAKE_CASE__ = model(__A )["""last_hidden_state"""]
SCREAMING_SNAKE_CASE__ = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , __A )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor(
[[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) ) | 59 |
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
_lowerCamelCase = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser(
description="""Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.""" )
parser.add_argument(
"""--dataset_name""" , type=UpperCamelCase__ , default="""wikitext""" , help="""Name of the training. Explore datasets at: hf.co/datasets.""" , )
parser.add_argument(
"""--dataset_config""" , type=UpperCamelCase__ , default="""wikitext-103-raw-v1""" , help="""Configuration name of the dataset.""" )
parser.add_argument(
"""--tokenizer_name_or_path""" , type=UpperCamelCase__ , default="""sayakpaul/unigram-tokenizer-wikitext""" , help="""Tokenizer identifier. Can be a local filepath or a Hub identifier.""" , )
parser.add_argument(
"""--shard_size""" , type=UpperCamelCase__ , default=1_000 , help="""Number of entries to go in a single shard.""" , )
parser.add_argument("""--split""" , type=UpperCamelCase__ , default="""train""" , choices=["""train""", """test""", """validation"""] )
parser.add_argument(
"""--limit""" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="""Limit the number of shards (used for debugging).""" , )
parser.add_argument(
"""--max_length""" , type=UpperCamelCase__ , default=512 , help="""Maximum sequence length. For training on TPUs, it helps to have a maximum"""
""" sequence length that is a multiple of 8.""" , )
parser.add_argument(
"""--output_dir""" , default="""tf-tpu""" , type=UpperCamelCase__ , help="""Output directory where the TFRecord shards will be saved. If the"""
""" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"""
""" shards will be directly saved to a Google Cloud Storage bucket.""" , )
SCREAMING_SNAKE_CASE__ = parser.parse_args()
return args
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] ):
def fn(UpperCamelCase__: Any ):
return tokenizer(examples["""text"""] )
return fn
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any ):
SCREAMING_SNAKE_CASE__ = []
for i in range(len(tokenized_data["""input_ids"""] ) ):
SCREAMING_SNAKE_CASE__ = {
"""input_ids""": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["""input_ids"""][i] ) ),
"""attention_mask""": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["""attention_mask"""][i] ) ),
}
SCREAMING_SNAKE_CASE__ = tf.train.Features(feature=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = tf.train.Example(features=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = example.SerializeToString()
records.append(UpperCamelCase__ )
return records
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] ):
SCREAMING_SNAKE_CASE__ = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
SCREAMING_SNAKE_CASE__ = min(len(UpperCamelCase__ ) , args.limit )
SCREAMING_SNAKE_CASE__ = dataset.select(range(UpperCamelCase__ ) )
print(f'''Limiting the dataset to {args.limit} entries.''' )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
SCREAMING_SNAKE_CASE__ = os.path.join(args.output_dir , args.split )
if not os.path.exists(UpperCamelCase__ ):
os.makedirs(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE__ = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
SCREAMING_SNAKE_CASE__ = tokenize_function(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = dataset.map(UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=4 , remove_columns=["""text"""] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(UpperCamelCase__: int ):
# Concatenate all texts.
SCREAMING_SNAKE_CASE__ = {k: sum(examples[k] , [] ) for k in examples.keys()}
SCREAMING_SNAKE_CASE__ = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
SCREAMING_SNAKE_CASE__ = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
SCREAMING_SNAKE_CASE__ = {
k: [t[i : i + args.max_length] for i in range(0 , UpperCamelCase__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
SCREAMING_SNAKE_CASE__ = dataset_tokenized.map(UpperCamelCase__ , batched=UpperCamelCase__ , batch_size=1_000 , num_proc=4 )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
for shard in range(0 , len(UpperCamelCase__ ) , args.shard_size ):
SCREAMING_SNAKE_CASE__ = grouped_dataset[shard : shard + args.shard_size]
SCREAMING_SNAKE_CASE__ = len(dataset_snapshot["""input_ids"""] )
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase__ , f'''dataset-{shard_count}-{records_containing}.tfrecord''' )
SCREAMING_SNAKE_CASE__ = get_serialized_examples(UpperCamelCase__ )
with tf.io.TFRecordWriter(UpperCamelCase__ ) as out_file:
for i in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE__ = serialized_examples[i]
out_file.write(UpperCamelCase__ )
print("""Wrote file {} containing {} records""".format(UpperCamelCase__ , UpperCamelCase__ ) )
shard_count += 1
total_records += records_containing
with open(f'''split-{args.split}-records-count.txt''' , """w""" ) as f:
print(f'''Total {args.split} records: {total_records}''' , file=UpperCamelCase__ )
if __name__ == "__main__":
_lowerCamelCase = parse_args()
main(args) | 59 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase_ = field(
default=__A , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase_ = field(
default=__A , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase_ = field(
default=__A , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase_ = field(default=__A , metadata={'''help''': '''Whether tp freeze the encoder.'''} )
UpperCamelCase_ = field(default=__A , metadata={'''help''': '''Whether to freeze the embeddings.'''} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
UpperCamelCase_ = field(
default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , )
UpperCamelCase_ = field(
default=1024 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase_ = field(
default=128 , metadata={
'''help''': (
'''The maximum total sequence length for target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase_ = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for validation target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded. '''
'''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '''
'''during ``evaluate`` and ``predict``.'''
)
} , )
UpperCamelCase_ = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for test target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase_ = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} )
UpperCamelCase_ = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} )
UpperCamelCase_ = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} )
UpperCamelCase_ = field(default=__A , metadata={'''help''': '''Source language id for translation.'''} )
UpperCamelCase_ = field(default=__A , metadata={'''help''': '''Target language id for translation.'''} )
UpperCamelCase_ = field(default=__A , metadata={'''help''': '''# num_beams to use for evaluation.'''} )
UpperCamelCase_ = field(
default=__A , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , )
def lowercase_ ( __A : Dict , __A : Optional[Any] , __A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
logger.info(F'***** {split} metrics *****' )
for key in sorted(metrics.keys() ):
logger.info(F' {key} = {metrics[key]}' )
save_json(__A , os.path.join(__A , F'{split}_results.json' ) )
def lowercase_ ( ) -> int:
"""simple docstring"""
lowercase : Union[str, Any] =HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase , lowercase , lowercase : Optional[Any] =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase , lowercase , lowercase : Optional[int] =parser.parse_args_into_dataclasses()
check_output_dir(__A )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , __A )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase : Union[str, Any] =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase : Dict =('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(__A , __A , __A ):
assert hasattr(__A , __A ), F'({config.__class__.__name__}) doesn\'t have a `{p}` attribute'
setattr(__A , __A , getattr(__A , __A ) )
lowercase : Optional[Any] =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase : Dict =AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=__A , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(__A , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowercase : Dict =model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(__A , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(__A , __A ):
lowercase : int =tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowercase : Tuple =tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(__A )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowercase : int =SeqaSeqDataset
# Get datasets
lowercase : Optional[Any] =(
dataset_class(
__A , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_train
else None
)
lowercase : Optional[int] =(
dataset_class(
__A , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowercase : Any =(
dataset_class(
__A , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
lowercase : Dict =(
build_compute_metrics_fn(data_args.task , __A ) if training_args.predict_with_generate else None
)
lowercase : Optional[int] =SeqaSeqTrainer(
model=__A , args=__A , data_args=__A , train_dataset=__A , eval_dataset=__A , data_collator=SeqaSeqDataCollator(
__A , __A , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__A , tokenizer=__A , )
lowercase : List[str] ={}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
lowercase : Optional[int] =trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowercase : Any =train_result.metrics
lowercase : Optional[int] =data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''' , __A , training_args.output_dir )
all_metrics.update(__A )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase : int =trainer.evaluate(metric_key_prefix='''val''' )
lowercase : str =data_args.n_val
lowercase : Tuple =round(metrics['''val_loss'''] , 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''' , __A , training_args.output_dir )
all_metrics.update(__A )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
lowercase : Union[str, Any] =trainer.predict(test_dataset=__A , metric_key_prefix='''test''' )
lowercase : List[Any] =test_output.metrics
lowercase : Tuple =data_args.n_test
if trainer.is_world_process_zero():
lowercase : Union[str, Any] =round(metrics['''test_loss'''] , 4 )
handle_metrics('''test''' , __A , training_args.output_dir )
all_metrics.update(__A )
if training_args.predict_with_generate:
lowercase : Any =tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=__A , clean_up_tokenization_spaces=__A )
lowercase : Tuple =lmap(str.strip , __A )
write_txt_file(__A , os.path.join(training_args.output_dir , '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(__A , os.path.join(training_args.output_dir , '''all_results.json''' ) )
return all_metrics
def lowercase_ ( __A : str ) -> Dict:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 94 |
import tensorflow as tf
from ...tf_utils import shape_list
class a ( tf.keras.layers.Layer ):
def __init__( self :Tuple ,__lowercase :Optional[int] ,__lowercase :List[Any] ,__lowercase :int ,__lowercase :str ,__lowercase :List[str]=1 ,__lowercase :Optional[Any]=False ,**__lowercase :str ):
super().__init__(**__lowercase )
snake_case__ : Union[str, Any] = vocab_size
snake_case__ : Dict = d_embed
snake_case__ : Dict = d_proj
snake_case__ : str = cutoffs + [vocab_size]
snake_case__ : Tuple = [0] + self.cutoffs
snake_case__ : Optional[int] = div_val
snake_case__ : Optional[int] = self.cutoffs[0]
snake_case__ : int = len(self.cutoffs ) - 1
snake_case__ : Any = self.shortlist_size + self.n_clusters
snake_case__ : List[str] = keep_order
snake_case__ : Tuple = []
snake_case__ : str = []
def __lowerCamelCase ( self :str ,__lowercase :Optional[int] ):
if self.n_clusters > 0:
snake_case__ : Tuple = self.add_weight(
shape=(self.n_clusters, self.d_embed) ,initializer='''zeros''' ,trainable=__lowercase ,name='''cluster_weight''' )
snake_case__ : Optional[int] = self.add_weight(
shape=(self.n_clusters,) ,initializer='''zeros''' ,trainable=__lowercase ,name='''cluster_bias''' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
snake_case__ : int = self.add_weight(
shape=(self.d_embed, self.d_proj) ,initializer='''zeros''' ,trainable=__lowercase ,name=F"""out_projs_._{i}""" ,)
self.out_projs.append(__lowercase )
else:
self.out_projs.append(__lowercase )
snake_case__ : Optional[int] = self.add_weight(
shape=(self.vocab_size, self.d_embed) ,initializer='''zeros''' ,trainable=__lowercase ,name=F"""out_layers_._{i}_._weight""" ,)
snake_case__ : int = self.add_weight(
shape=(self.vocab_size,) ,initializer='''zeros''' ,trainable=__lowercase ,name=F"""out_layers_._{i}_._bias""" ,)
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
snake_case__ , snake_case__ : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
snake_case__ : List[str] = self.d_embed // (self.div_val**i)
snake_case__ : str = self.add_weight(
shape=(d_emb_i, self.d_proj) ,initializer='''zeros''' ,trainable=__lowercase ,name=F"""out_projs_._{i}""" )
self.out_projs.append(__lowercase )
snake_case__ : Optional[int] = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) ,initializer='''zeros''' ,trainable=__lowercase ,name=F"""out_layers_._{i}_._weight""" ,)
snake_case__ : Optional[int] = self.add_weight(
shape=(r_idx - l_idx,) ,initializer='''zeros''' ,trainable=__lowercase ,name=F"""out_layers_._{i}_._bias""" ,)
self.out_layers.append((weight, bias) )
super().build(__lowercase )
@staticmethod
def __lowerCamelCase ( __lowercase :List[str] ,__lowercase :int ,__lowercase :str ,__lowercase :Dict=None ):
snake_case__ : Dict = x
if proj is not None:
snake_case__ : str = tf.einsum('''ibd,ed->ibe''' ,__lowercase ,__lowercase )
return tf.einsum('''ibd,nd->ibn''' ,__lowercase ,__lowercase ) + b
@staticmethod
def __lowerCamelCase ( __lowercase :int ,__lowercase :Any ):
snake_case__ : Union[str, Any] = shape_list(__lowercase )
snake_case__ : int = tf.range(lp_size[0] ,dtype=target.dtype )
snake_case__ : Tuple = tf.stack([r, target] ,1 )
return tf.gather_nd(__lowercase ,__lowercase )
def __lowerCamelCase ( self :str ,__lowercase :Optional[int] ,__lowercase :Tuple ,__lowercase :Union[str, Any]=True ,__lowercase :str=False ):
snake_case__ : Any = 0
if self.n_clusters == 0:
snake_case__ : int = self._logit(__lowercase ,self.out_layers[0][0] ,self.out_layers[0][1] ,self.out_projs[0] )
if target is not None:
snake_case__ : List[Any] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=__lowercase ,logits=__lowercase )
snake_case__ : Dict = tf.nn.log_softmax(__lowercase ,axis=-1 )
else:
snake_case__ : Any = shape_list(__lowercase )
snake_case__ : Dict = []
snake_case__ : Tuple = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
snake_case__ , snake_case__ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
snake_case__ : List[str] = (target >= l_idx) & (target < r_idx)
snake_case__ : Dict = tf.where(__lowercase )
snake_case__ : Any = tf.boolean_mask(__lowercase ,__lowercase ) - l_idx
if self.div_val == 1:
snake_case__ : List[Any] = self.out_layers[0][0][l_idx:r_idx]
snake_case__ : Dict = self.out_layers[0][1][l_idx:r_idx]
else:
snake_case__ : Optional[int] = self.out_layers[i][0]
snake_case__ : Optional[int] = self.out_layers[i][1]
if i == 0:
snake_case__ : Dict = tf.concat([cur_W, self.cluster_weight] ,0 )
snake_case__ : Union[str, Any] = tf.concat([cur_b, self.cluster_bias] ,0 )
snake_case__ : List[str] = self._logit(__lowercase ,__lowercase ,__lowercase ,self.out_projs[0] )
snake_case__ : Tuple = tf.nn.log_softmax(__lowercase )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
snake_case__ : Any = tf.boolean_mask(__lowercase ,__lowercase )
snake_case__ : Union[str, Any] = self._gather_logprob(__lowercase ,__lowercase )
else:
snake_case__ : Tuple = self._logit(__lowercase ,__lowercase ,__lowercase ,self.out_projs[i] )
snake_case__ : Dict = tf.nn.log_softmax(__lowercase )
snake_case__ : Optional[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
snake_case__ : Union[str, Any] = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(__lowercase )
if target is not None:
snake_case__ : Dict = tf.boolean_mask(__lowercase ,__lowercase )
snake_case__ : Tuple = tf.boolean_mask(__lowercase ,__lowercase )
snake_case__ : str = self._gather_logprob(__lowercase ,__lowercase )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(__lowercase ,-cur_logprob ,shape_list(__lowercase ) )
snake_case__ : Any = tf.concat(__lowercase ,axis=-1 )
if target is not None:
if return_mean:
snake_case__ : List[str] = tf.reduce_mean(__lowercase )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(__lowercase )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(__lowercase ,name=self.name ,aggregation='''mean''' if return_mean else '''''' )
return out
| 252 | 0 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class __magic_name__ :
lowercase : Tuple =42
lowercase : Any =None
lowercase : List[Any] =None
def lowerCamelCase_(lowerCamelCase_ ) -> List[Any]:
# Validation
def is_valid_tree(lowerCamelCase_ ) -> bool:
if node is None:
return True
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(lowerCAmelCase__ ):
raise ValueError(
"Each node should be type of TreeNode and data should be float." )
def is_binary_search_tree_recursive_check(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , lowerCAmelCase__ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , lowerCAmelCase__ )
)
return is_binary_search_tree_recursive_check(lowerCAmelCase__ , -float("inf" ) , float("inf" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
class __magic_name__ ( A__ ):
lowercase : Tuple =['''pixel_values''']
def __init__( self : Any , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : float = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 2_55 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : List[str] , ) -> None:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
UpperCAmelCase = size if size is not None else {"shortest_edge": 3_84}
UpperCAmelCase = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
UpperCAmelCase = do_resize
UpperCAmelCase = size
# Default value set here for backwards compatibility where the value in config is None
UpperCAmelCase = crop_pct if crop_pct is not None else 2_24 / 2_56
UpperCAmelCase = resample
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : float , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Union[str, Any] , ) -> np.ndarray:
'''simple docstring'''
UpperCAmelCase = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
UpperCAmelCase = size["shortest_edge"]
if shortest_edge < 3_84:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
UpperCAmelCase = int(shortest_edge / crop_pct )
UpperCAmelCase = get_resize_output_image_size(UpperCamelCase__ , size=UpperCamelCase__ , default_to_square=UpperCamelCase__ )
UpperCAmelCase = resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=UpperCamelCase__ , size=(shortest_edge, shortest_edge) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
UpperCamelCase__ , size=(shortest_edge, shortest_edge) , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] , ) -> List[str]:
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : float = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Any , ) -> PIL.Image.Image:
'''simple docstring'''
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = crop_pct if crop_pct is not None else self.crop_pct
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
UpperCAmelCase = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 3_84 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
UpperCAmelCase = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , crop_pct=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_rescale:
UpperCAmelCase = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
UpperCAmelCase = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
UpperCAmelCase = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
UpperCAmelCase = {"pixel_values": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 457 | 0 |
"""simple docstring"""
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
A_ : List[str] =collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
A_ : Any ="""https://storage.googleapis.com/cvdf-datasets/mnist/"""
def SCREAMING_SNAKE_CASE_ ( snake_case : Tuple )-> Optional[int]:
_lowerCamelCase = numpy.dtype(numpy.uintaa ).newbyteorder('>' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=snake_case )[0]
@deprecated(snake_case , 'Please use tf.data to implement this functionality.' )
def SCREAMING_SNAKE_CASE_ ( snake_case : Union[str, Any] )-> Dict:
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=snake_case ) as bytestream:
_lowerCamelCase = _readaa(snake_case )
if magic != 2_051:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) )
_lowerCamelCase = _readaa(snake_case )
_lowerCamelCase = _readaa(snake_case )
_lowerCamelCase = _readaa(snake_case )
_lowerCamelCase = bytestream.read(rows * cols * num_images )
_lowerCamelCase = numpy.frombuffer(snake_case , dtype=numpy.uinta )
_lowerCamelCase = data.reshape(snake_case , snake_case , snake_case , 1 )
return data
@deprecated(snake_case , 'Please use tf.one_hot on tensors.' )
def SCREAMING_SNAKE_CASE_ ( snake_case : List[Any] , snake_case : List[str] )-> Tuple:
_lowerCamelCase = labels_dense.shape[0]
_lowerCamelCase = numpy.arange(snake_case ) * num_classes
_lowerCamelCase = numpy.zeros((num_labels, num_classes) )
_lowerCamelCase = 1
return labels_one_hot
@deprecated(snake_case , 'Please use tf.data to implement this functionality.' )
def SCREAMING_SNAKE_CASE_ ( snake_case : str , snake_case : Optional[int]=False , snake_case : str=10 )-> List[Any]:
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=snake_case ) as bytestream:
_lowerCamelCase = _readaa(snake_case )
if magic != 2_049:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) )
_lowerCamelCase = _readaa(snake_case )
_lowerCamelCase = bytestream.read(snake_case )
_lowerCamelCase = numpy.frombuffer(snake_case , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(snake_case , snake_case )
return labels
class __a :
@deprecated(
a__ , 'Please use alternatives such as official/mnist/_DataSet.py'
' from tensorflow/models.' , )
def __init__( self , a__ , a__ , a__=False , a__=False , a__=dtypes.floataa , a__=True , a__=None , ):
_lowerCamelCase , _lowerCamelCase = random_seed.get_seed(a__ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
_lowerCamelCase = dtypes.as_dtype(a__ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype )
if fake_data:
_lowerCamelCase = 1_00_00
_lowerCamelCase = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'images.shape: {images.shape} labels.shape: {labels.shape}'
_lowerCamelCase = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
_lowerCamelCase = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
_lowerCamelCase = images.astype(numpy.floataa )
_lowerCamelCase = numpy.multiply(a__ , 1.0 / 255.0 )
_lowerCamelCase = images
_lowerCamelCase = labels
_lowerCamelCase = 0
_lowerCamelCase = 0
@property
def snake_case_ ( self ):
return self._images
@property
def snake_case_ ( self ):
return self._labels
@property
def snake_case_ ( self ):
return self._num_examples
@property
def snake_case_ ( self ):
return self._epochs_completed
def snake_case_ ( self , a__ , a__=False , a__=True ):
if fake_data:
_lowerCamelCase = [1] * 7_84
_lowerCamelCase = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(a__ )],
[fake_label for _ in range(a__ )],
)
_lowerCamelCase = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
_lowerCamelCase = numpy.arange(self._num_examples )
numpy.random.shuffle(a__ )
_lowerCamelCase = self.images[perma]
_lowerCamelCase = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
_lowerCamelCase = self._num_examples - start
_lowerCamelCase = self._images[start : self._num_examples]
_lowerCamelCase = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
_lowerCamelCase = numpy.arange(self._num_examples )
numpy.random.shuffle(a__ )
_lowerCamelCase = self.images[perm]
_lowerCamelCase = self.labels[perm]
# Start next epoch
_lowerCamelCase = 0
_lowerCamelCase = batch_size - rest_num_examples
_lowerCamelCase = self._index_in_epoch
_lowerCamelCase = self._images[start:end]
_lowerCamelCase = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
_lowerCamelCase = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(snake_case , 'Please write your own downloading logic.' )
def SCREAMING_SNAKE_CASE_ ( snake_case : List[Any] , snake_case : Union[str, Any] , snake_case : int )-> int:
if not gfile.Exists(snake_case ):
gfile.MakeDirs(snake_case )
_lowerCamelCase = os.path.join(snake_case , snake_case )
if not gfile.Exists(snake_case ):
urllib.request.urlretrieve(snake_case , snake_case ) # noqa: S310
with gfile.GFile(snake_case ) as f:
_lowerCamelCase = f.size()
print('Successfully downloaded' , snake_case , snake_case , 'bytes.' )
return filepath
@deprecated(
snake_case , 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' )
def SCREAMING_SNAKE_CASE_ ( snake_case : Optional[int] , snake_case : Dict=False , snake_case : Tuple=False , snake_case : str=dtypes.floataa , snake_case : str=True , snake_case : Union[str, Any]=5_000 , snake_case : List[Any]=None , snake_case : str=DEFAULT_SOURCE_URL , )-> Tuple:
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=snake_case , one_hot=snake_case , dtype=snake_case , seed=snake_case )
_lowerCamelCase = fake()
_lowerCamelCase = fake()
_lowerCamelCase = fake()
return _Datasets(train=snake_case , validation=snake_case , test=snake_case )
if not source_url: # empty string check
_lowerCamelCase = DEFAULT_SOURCE_URL
_lowerCamelCase = 'train-images-idx3-ubyte.gz'
_lowerCamelCase = 'train-labels-idx1-ubyte.gz'
_lowerCamelCase = 't10k-images-idx3-ubyte.gz'
_lowerCamelCase = 't10k-labels-idx1-ubyte.gz'
_lowerCamelCase = _maybe_download(
snake_case , snake_case , source_url + train_images_file )
with gfile.Open(snake_case , 'rb' ) as f:
_lowerCamelCase = _extract_images(snake_case )
_lowerCamelCase = _maybe_download(
snake_case , snake_case , source_url + train_labels_file )
with gfile.Open(snake_case , 'rb' ) as f:
_lowerCamelCase = _extract_labels(snake_case , one_hot=snake_case )
_lowerCamelCase = _maybe_download(
snake_case , snake_case , source_url + test_images_file )
with gfile.Open(snake_case , 'rb' ) as f:
_lowerCamelCase = _extract_images(snake_case )
_lowerCamelCase = _maybe_download(
snake_case , snake_case , source_url + test_labels_file )
with gfile.Open(snake_case , 'rb' ) as f:
_lowerCamelCase = _extract_labels(snake_case , one_hot=snake_case )
if not 0 <= validation_size <= len(snake_case ):
_lowerCamelCase = (
'Validation size should be between 0 and '
f'{len(snake_case )}. Received: {validation_size}.'
)
raise ValueError(snake_case )
_lowerCamelCase = train_images[:validation_size]
_lowerCamelCase = train_labels[:validation_size]
_lowerCamelCase = train_images[validation_size:]
_lowerCamelCase = train_labels[validation_size:]
_lowerCamelCase = {'dtype': dtype, 'reshape': reshape, 'seed': seed}
_lowerCamelCase = _DataSet(snake_case , snake_case , **snake_case )
_lowerCamelCase = _DataSet(snake_case , snake_case , **snake_case )
_lowerCamelCase = _DataSet(snake_case , snake_case , **snake_case )
return _Datasets(train=snake_case , validation=snake_case , test=snake_case )
| 650 |
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class __a ( lowerCAmelCase__ ):
def __init__( self , a__ , a__=None , a__=True , a__=None , **a__ ):
_lowerCamelCase = parent
_lowerCamelCase = config_class
_lowerCamelCase = has_text_modality
_lowerCamelCase = kwargs
_lowerCamelCase = common_properties
def snake_case_ ( self ):
_lowerCamelCase = self.config_class(**self.inputs_dict )
_lowerCamelCase = (
['hidden_size', 'num_attention_heads', 'num_hidden_layers']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['vocab_size'] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(a__ , a__ ) , msg=F'`{prop}` does not exist' )
# Test that config has the common properties as setter
for idx, name in enumerate(a__ ):
try:
setattr(a__ , a__ , a__ )
self.parent.assertEqual(
getattr(a__ , a__ ) , a__ , msg=F'`{name} value {idx} expected, but was {getattr(a__ , a__ )}' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(a__ ):
try:
_lowerCamelCase = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(a__ , a__ ) , a__ , msg=F'`{name} value {idx} expected, but was {getattr(a__ , a__ )}' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def snake_case_ ( self ):
_lowerCamelCase = self.config_class(**self.inputs_dict )
_lowerCamelCase = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , a__ )
def snake_case_ ( self ):
_lowerCamelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = os.path.join(a__ , 'config.json' )
config_first.to_json_file(a__ )
_lowerCamelCase = self.config_class.from_json_file(a__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def snake_case_ ( self ):
_lowerCamelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(a__ )
_lowerCamelCase = self.config_class.from_pretrained(a__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def snake_case_ ( self ):
_lowerCamelCase = self.config_class(**self.inputs_dict )
_lowerCamelCase = 'test'
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = os.path.join(a__ , a__ )
config_first.save_pretrained(a__ )
_lowerCamelCase = self.config_class.from_pretrained(a__ , subfolder=a__ )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def snake_case_ ( self ):
_lowerCamelCase = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
_lowerCamelCase = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def snake_case_ ( self ):
if self.config_class.is_composition:
return
_lowerCamelCase = self.config_class()
self.parent.assertIsNotNone(a__ )
def snake_case_ ( self ):
_lowerCamelCase = copy.deepcopy(a__ )
_lowerCamelCase = self.config_class(**a__ )
_lowerCamelCase = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('torch_dtype', config.torch_dtype, torch.floataa) )
elif getattr(a__ , a__ ) != value:
wrong_values.append((key, getattr(a__ , a__ ), value) )
if len(a__ ) > 0:
_lowerCamelCase = '\n'.join([F'- {v[0]}: got {v[1]} instead of {v[2]}' for v in wrong_values] )
raise ValueError(F'The following keys were not properly set in the config:\n{errors}' )
def snake_case_ ( self ):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 650 | 1 |
'''simple docstring'''
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def lowercase_ ( _lowercase ) -> Dict:
'''simple docstring'''
lowerCamelCase_ : Dict = checkpoints.load_tax_checkpoint(_lowercase )
lowerCamelCase_ : Dict = flatten_dict(_lowercase )
return flax_params
def lowercase_ ( _lowercase ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ : List[str] = {}
lowerCamelCase_ : str = {
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
lowerCamelCase_ : Tuple = {
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
lowerCamelCase_ : Union[str, Any] = '''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
lowerCamelCase_ : Dict = new_key.replace(_lowercase , _lowercase )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
lowerCamelCase_ : int = new_key.replace(_lowercase , _lowercase )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
lowerCamelCase_ : List[Any] = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , _lowercase )
lowerCamelCase_ : Optional[int] = new_key.replace('''encoder''' , '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
lowerCamelCase_ : List[str] = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , _lowercase )
lowerCamelCase_ : Optional[int] = flax_dict[key]
lowerCamelCase_ : str = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
lowerCamelCase_ : str = torch.from_numpy(converted_dict[key].T )
else:
lowerCamelCase_ : Tuple = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def lowercase_ ( _lowercase , _lowercase , _lowercase=False , _lowercase=False ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ : str = get_flax_param(_lowercase )
if not use_large:
lowerCamelCase_ : List[str] = PixaStructVisionConfig()
lowerCamelCase_ : List[str] = PixaStructTextConfig()
else:
lowerCamelCase_ : Any = PixaStructVisionConfig(
hidden_size=1_536 , d_ff=3_968 , num_attention_heads=24 , num_hidden_layers=18 )
lowerCamelCase_ : Any = PixaStructTextConfig(hidden_size=1_536 , d_ff=3_968 , num_heads=24 , num_layers=18 )
lowerCamelCase_ : Union[str, Any] = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=_lowercase )
lowerCamelCase_ : Optional[Any] = PixaStructForConditionalGeneration(_lowercase )
lowerCamelCase_ : str = rename_and_convert_flax_params(_lowercase )
model.load_state_dict(_lowercase )
lowerCamelCase_ : List[str] = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
lowerCamelCase_ : List[str] = PixaStructImageProcessor()
lowerCamelCase_ : int = PixaStructProcessor(image_processor=_lowercase , tokenizer=_lowercase )
if use_large:
lowerCamelCase_ : List[Any] = 4_096
lowerCamelCase_ : Tuple = True
# mkdir if needed
os.makedirs(_lowercase , exist_ok=_lowercase )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
print('''Model saved in {}'''.format(_lowercase ) )
if __name__ == "__main__":
__lowercase : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''')
parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''')
__lowercase : int = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 357 |
'''simple docstring'''
def lowercase_ ( _lowercase , _lowercase ) -> Dict:
'''simple docstring'''
lowerCamelCase_ : List[Any] = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ : str = 0
while b > 0:
if b & 1:
lowerCamelCase_ : Optional[int] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 357 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
UpperCAmelCase_ = (3, 9, -1_1, 0, 7, 5, 1, -1)
UpperCAmelCase_ = (4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
lowerCAmelCase_ : int
lowerCAmelCase_ : Node | None
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , _UpperCAmelCase : Iterable[int] ):
"""simple docstring"""
UpperCAmelCase__ = None
for i in sorted(_UpperCAmelCase , reverse=_UpperCAmelCase ):
UpperCAmelCase__ = Node(_UpperCAmelCase , self.head )
def __iter__( self : str ):
"""simple docstring"""
UpperCAmelCase__ = self.head
while node:
yield node.data
UpperCAmelCase__ = node.next_node
def __len__( self : int ):
"""simple docstring"""
return sum(1 for _ in self )
def __str__( self : Tuple ):
"""simple docstring"""
return " -> ".join([str(_UpperCAmelCase ) for node in self] )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : SortedLinkedList , SCREAMING_SNAKE_CASE__ : SortedLinkedList ):
'''simple docstring'''
return SortedLinkedList(list(SCREAMING_SNAKE_CASE__ ) + list(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 603 |
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __lt__( self : List[Any] , _UpperCAmelCase : Dict ):
"""simple docstring"""
return self[-1] < other[-1]
def __eq__( self : str , _UpperCAmelCase : Tuple ):
"""simple docstring"""
return self[-1] == other[-1]
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : list ):
'''simple docstring'''
UpperCAmelCase__ = []
# sort into stacks
for element in collection:
UpperCAmelCase__ = Stack([element] )
UpperCAmelCase__ = bisect_left(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if i != len(SCREAMING_SNAKE_CASE__ ):
stacks[i].append(SCREAMING_SNAKE_CASE__ )
else:
stacks.append(SCREAMING_SNAKE_CASE__ )
# use a heap-based merge to merge stack efficiently
UpperCAmelCase__ = merge(*(reversed(SCREAMING_SNAKE_CASE__ ) for stack in stacks) )
return collection
if __name__ == "__main__":
UpperCAmelCase_ = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase_ = [int(item) for item in user_input.split(',')]
print(patience_sort(unsorted))
| 603 | 1 |
from __future__ import annotations
from typing import Any
class UpperCamelCase :
'''simple docstring'''
def __init__( self , UpperCamelCase_ ):
lowercase_ :List[str] = num_of_nodes
lowercase_ :list[list[int]] = []
lowercase_ :dict[int, int] = {}
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
self.m_edges.append([u_node, v_node, weight] )
def UpperCamelCase ( self , UpperCamelCase_ ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def UpperCamelCase ( self , UpperCamelCase_ ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowercase_ :str = self.find_component(UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if component_size[u_node] <= component_size[v_node]:
lowercase_ :List[Any] = v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCamelCase_ )
elif component_size[u_node] >= component_size[v_node]:
lowercase_ :List[Any] = self.find_component(UpperCamelCase_ )
component_size[u_node] += component_size[v_node]
self.set_component(UpperCamelCase_ )
def UpperCamelCase ( self ):
lowercase_ :int = []
lowercase_ :str = 0
lowercase_ :list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
lowercase_ :List[Any] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowercase_ , lowercase_ , lowercase_ :int = edge
lowercase_ :Optional[int] = self.m_component[u]
lowercase_ :List[str] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowercase_ :List[str] = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ , lowercase_ , lowercase_ :int = edge
lowercase_ :List[Any] = self.m_component[u]
lowercase_ :Dict = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
lowercase_ :str = [-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def UpperCamelCase ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 441 |
def UpperCamelCase ( _a , _a , _a ) -> int:
'''simple docstring'''
def count_of_possible_combinations(_a ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_a )
def UpperCamelCase ( _a , _a , _a ) -> int:
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_a , _a ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
lowercase_ :Optional[int] = sum(
count_of_possible_combinations_with_dp_array(target - item , _a )
for item in array )
lowercase_ :List[Any] = answer
return answer
lowercase_ :Dict = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_a , _a )
def UpperCamelCase ( _a , _a , _a ) -> int:
'''simple docstring'''
lowercase_ :Optional[int] = [0] * (target + 1)
lowercase_ :int = 1
for i in range(1 , target + 1 ):
for j in range(_a ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : Optional[Any] = 3
SCREAMING_SNAKE_CASE : Tuple = 5
SCREAMING_SNAKE_CASE : List[Any] = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 441 | 1 |
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
a_ : List[str] = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
a_ : List[Any] = [0, 2_5, 5_0]
a_ : Any = [2_5, 5_0, 7_5]
a_ : str = fuzz.membership.trimf(X, abca)
a_ : int = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
a_ : List[str] = np.ones(7_5)
a_ : List[str] = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
a_ : Optional[Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
a_ : Optional[Any] = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
a_ : int = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
a_ : Tuple = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
a_ : int = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
a_ : Optional[int] = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
a_ : Optional[int] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
a_ : Union[str, Any] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show() | 623 |
"""simple docstring"""
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = ComputeEnvironment.AMAZON_SAGEMAKER
A_ = True
A_ = '''ml.p3.2xlarge'''
A_ = '''accelerate_sagemaker_execution_role'''
A_ = '''hf-sm'''
A_ = '''us-east-1'''
A_ = 1
A_ = '''accelerate-sagemaker-1'''
A_ = '''1.6'''
A_ = '''4.4'''
A_ = '''train.py'''
A_ = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''False''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
A_ = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''--do_test''',
'''False''',
'''--do_predict''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
class snake_case_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> List[Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
UpperCamelCase = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args)
assert isinstance(converted_args['''model_name_or_path'''] , lowerCamelCase_)
assert isinstance(converted_args['''do_train'''] , lowerCamelCase_)
assert isinstance(converted_args['''epochs'''] , lowerCamelCase_)
assert isinstance(converted_args['''learning_rate'''] , lowerCamelCase_)
assert isinstance(converted_args['''max_steps'''] , lowerCamelCase_)
with pytest.raises(lowerCamelCase_):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args) | 34 | 0 |
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
A_ : Dict = logging.getLogger(__name__)
A_ : int = "Hello world! cécé herlolip"
A_ : Dict = namedtuple(
"BertAbsConfig",
[
"temp_dir",
"large",
"use_bert_emb",
"finetune_bert",
"encoder",
"share_emb",
"max_pos",
"enc_layers",
"enc_hidden_size",
"enc_heads",
"enc_ff_size",
"enc_dropout",
"dec_layers",
"dec_hidden_size",
"dec_heads",
"dec_ff_size",
"dec_dropout",
],
)
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : List[str] ) -> List[Any]:
'''simple docstring'''
snake_case__ : str = BertAbsConfig(
temp_dir=""".""" , finetune_bert=__magic_name__ , large=__magic_name__ , share_emb=__magic_name__ , use_bert_emb=__magic_name__ , encoder="""bert""" , max_pos=5_12 , enc_layers=6 , enc_hidden_size=5_12 , enc_heads=8 , enc_ff_size=5_12 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_68 , dec_heads=8 , dec_ff_size=20_48 , dec_dropout=0.2 , )
snake_case__ : Optional[Any] = torch.load(__magic_name__ , lambda __magic_name__ , __magic_name__ : storage )
snake_case__ : int = AbsSummarizer(__magic_name__ , torch.device("""cpu""" ) , __magic_name__ )
original.eval()
snake_case__ : Optional[int] = BertAbsSummarizer(__magic_name__ , torch.device("""cpu""" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("""convert the model""" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("""Make sure that the models' outputs are identical""" )
snake_case__ : str = BertTokenizer.from_pretrained("""bert-base-uncased""" )
# prepare the model inputs
snake_case__ : Any = tokenizer.encode("""This is sample éàalj'-.""" )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(__magic_name__ )) )
snake_case__ : int = torch.tensor(__magic_name__ ).unsqueeze(0 )
snake_case__ : int = tokenizer.encode("""This is sample 3 éàalj'-.""" )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(__magic_name__ )) )
snake_case__ : Tuple = torch.tensor(__magic_name__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
snake_case__ : Optional[int] = encoder_input_ids
snake_case__ : Tuple = decoder_input_ids
snake_case__ : int = None
snake_case__ : Optional[Any] = None
snake_case__ : Optional[int] = None
snake_case__ : Optional[Any] = None
snake_case__ : str = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
snake_case__ : Union[str, Any] = original(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )[0]
snake_case__ : Dict = original.generator(__magic_name__ )
snake_case__ : Optional[Any] = new_model(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )[0]
snake_case__ : Union[str, Any] = new_model.generator(__magic_name__ )
snake_case__ : int = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(__magic_name__ ) )
snake_case__ : Optional[Any] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("""Maximum absolute difference beween weights: {:.2f}""".format(__magic_name__ ) )
snake_case__ : Dict = torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 )
if are_identical:
logging.info("""all weights are equal up to 1e-3""" )
else:
raise ValueError("""the weights are different. The new model is likely different from the original one.""" )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("""saving the model's state dictionary""" )
torch.save(
new_model.state_dict() , """./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin""" )
if __name__ == "__main__":
A_ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"--bertabs_checkpoint_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model.",
)
A_ : Union[str, Any] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 419 |
'''simple docstring'''
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def UpperCamelCase__ ( __magic_name__ : Union[str, Any] , __magic_name__ : str , __magic_name__ : Any ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = AutoConfig.from_pretrained(__magic_name__ )
snake_case__ : Optional[Any] = FlaxAutoModelForSeqaSeqLM.from_config(config=__magic_name__ )
snake_case__ : str = checkpoints.load_tax_checkpoint(__magic_name__ )
snake_case__ : Optional[int] = """wi_0""" in tax_model["""target"""]["""encoder"""]["""layers_0"""]["""mlp"""]
if config.model_type == "t5":
snake_case__ : Tuple = """SelfAttention"""
if config.model_type == "longt5" and config.encoder_attention_type == "local":
snake_case__ : List[Any] = """LocalSelfAttention"""
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case__ : List[str] = """TransientGlobalSelfAttention"""
else:
raise ValueError(
"""Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"""
""" attribute with a value from ['local', 'transient-global].""" )
# Encoder
for layer_index in range(config.num_layers ):
snake_case__ : Union[str, Any] = f"layers_{str(__magic_name__ )}"
# Self-Attention
snake_case__ : Tuple = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""key"""]["""kernel"""]
snake_case__ : List[Any] = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""out"""]["""kernel"""]
snake_case__ : Optional[Any] = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""query"""]["""kernel"""]
snake_case__ : Optional[int] = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""value"""]["""kernel"""]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case__ : Optional[Any] = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""T5LayerNorm_0"""]["""scale"""]
# Layer Normalization
snake_case__ : Any = tax_model["""target"""]["""encoder"""][layer_name]["""pre_attention_layer_norm"""]["""scale"""]
if split_mlp_wi:
snake_case__ : List[Any] = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
snake_case__ : Tuple = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
snake_case__ : Union[str, Any] = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
snake_case__ : Optional[Any] = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
snake_case__ : Any = tax_model["""target"""]["""encoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
snake_case__ : int = flax_model.params["""encoder"""]["""block"""][str(__magic_name__ )]["""layer"""]
snake_case__ : int = tax_attention_key
snake_case__ : Optional[int] = tax_attention_out
snake_case__ : Union[str, Any] = tax_attention_query
snake_case__ : Tuple = tax_attention_value
snake_case__ : Dict = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case__ : Optional[int] = tax_global_layer_norm
if split_mlp_wi:
snake_case__ : List[str] = tax_mlp_wi_a
snake_case__ : Any = tax_mlp_wi_a
else:
snake_case__ : Union[str, Any] = tax_mlp_wi
snake_case__ : Optional[Any] = tax_mlp_wo
snake_case__ : List[str] = tax_mlp_layer_norm
snake_case__ : List[Any] = flax_model_encoder_layer_block
# Only for layer 0:
snake_case__ : Optional[Any] = tax_model["""target"""]["""encoder"""]["""relpos_bias"""]["""rel_embedding"""].T
snake_case__ : str = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case__ : str = tax_model["""target"""]["""encoder"""]["""side_relpos_bias"""]["""rel_embedding"""].T
snake_case__ : Dict = tax_encoder_global_rel_embedding
# Assigning
snake_case__ : List[Any] = tax_model["""target"""]["""encoder"""]["""encoder_norm"""]["""scale"""]
snake_case__ : Dict = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
snake_case__ : Optional[Any] = f"layers_{str(__magic_name__ )}"
# Self-Attention
snake_case__ : Union[str, Any] = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""key"""]["""kernel"""]
snake_case__ : str = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""out"""]["""kernel"""]
snake_case__ : List[Any] = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""query"""]["""kernel"""]
snake_case__ : Any = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""value"""]["""kernel"""]
# Layer Normalization
snake_case__ : Union[str, Any] = tax_model["""target"""]["""decoder"""][layer_name]["""pre_self_attention_layer_norm"""][
"""scale"""
]
# Encoder-Decoder-Attention
snake_case__ : Optional[Any] = tax_model["""target"""]["""decoder"""][layer_name]["""encoder_decoder_attention"""]
snake_case__ : int = tax_enc_dec_attention_module["""key"""]["""kernel"""]
snake_case__ : Any = tax_enc_dec_attention_module["""out"""]["""kernel"""]
snake_case__ : Union[str, Any] = tax_enc_dec_attention_module["""query"""]["""kernel"""]
snake_case__ : int = tax_enc_dec_attention_module["""value"""]["""kernel"""]
# Layer Normalization
snake_case__ : Dict = tax_model["""target"""]["""decoder"""][layer_name]["""pre_cross_attention_layer_norm"""]["""scale"""]
# MLP
if split_mlp_wi:
snake_case__ : Optional[int] = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
snake_case__ : int = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
snake_case__ : List[Any] = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
snake_case__ : int = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
snake_case__ : Any = tax_model["""target"""]["""decoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
snake_case__ : Union[str, Any] = flax_model.params["""decoder"""]["""block"""][str(__magic_name__ )]["""layer"""]
snake_case__ : Dict = tax_attention_key
snake_case__ : int = tax_attention_out
snake_case__ : str = tax_attention_query
snake_case__ : Any = tax_attention_value
snake_case__ : List[Any] = tax_pre_attention_layer_norm
snake_case__ : Any = tax_enc_dec_attention_key
snake_case__ : Any = tax_enc_dec_attention_out
snake_case__ : Any = tax_enc_dec_attention_query
snake_case__ : str = tax_enc_dec_attention_value
snake_case__ : Dict = tax_cross_layer_norm
if split_mlp_wi:
snake_case__ : Tuple = tax_mlp_wi_a
snake_case__ : Dict = tax_mlp_wi_a
else:
snake_case__ : int = tax_mlp_wi
snake_case__ : List[Any] = tax_mlp_wo
snake_case__ : Union[str, Any] = txa_mlp_layer_norm
snake_case__ : int = flax_model_decoder_layer_block
# Decoder Normalization
snake_case__ : str = tax_model["""target"""]["""decoder"""]["""decoder_norm"""]["""scale"""]
snake_case__ : List[str] = txa_decoder_norm
# Only for layer 0:
snake_case__ : List[str] = tax_model["""target"""]["""decoder"""]["""relpos_bias"""]["""rel_embedding"""].T
snake_case__ : Any = tax_decoder_rel_embedding
# Token Embeddings
snake_case__ : Optional[Any] = tax_model["""target"""]["""token_embedder"""]["""embedding"""]
snake_case__ : Optional[int] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
snake_case__ : str = tax_model["""target"""]["""decoder"""]["""logits_dense"""]["""kernel"""]
flax_model.save_pretrained(__magic_name__ )
print("""T5X Model was sucessfully converted!""" )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint."
)
parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.")
parser.add_argument(
"--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model."
)
A_ : Dict = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 419 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Any = logging.get_logger(__name__)
def lowercase ( __A : Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case : Dict = """huggingface/label-files"""
snake_case : int = """imagenet-1k-id2label.json"""
snake_case : Tuple = json.load(open(hf_hub_download(__A , __A , repo_type="""dataset""" ) , """r""" ) )
snake_case : Any = {int(__A ): v for k, v in idalabel.items()}
snake_case : Dict = {v: k for k, v in idalabel.items()}
snake_case : Any = """std_conv""" if """bit""" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
snake_case : List[Any] = BitConfig(
conv_layer=__A , num_labels=1000 , idalabel=__A , labelaid=__A , )
return config
def lowercase ( __A : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
if "stem.conv" in name:
snake_case : List[str] = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
snake_case : List[str] = name.replace("""blocks""" , """layers""" )
if "head.fc" in name:
snake_case : Optional[int] = name.replace("""head.fc""" , """classifier.1""" )
if name.startswith("""norm""" ):
snake_case : Optional[Any] = """bit.""" + name
if "bit" not in name and "classifier" not in name:
snake_case : Tuple = """bit.encoder.""" + name
return name
def lowercase ( ) -> Optional[int]:
'''simple docstring'''
snake_case : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case : Optional[Any] = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def lowercase ( __A : Any , __A : Union[str, Any] , __A : str=False ) -> Optional[int]:
'''simple docstring'''
snake_case : str = get_config(__A )
# load original model from timm
snake_case : Tuple = create_model(__A , pretrained=__A )
timm_model.eval()
# load state_dict of original model
snake_case : List[str] = timm_model.state_dict()
for key in state_dict.copy().keys():
snake_case : List[Any] = state_dict.pop(__A )
snake_case : Union[str, Any] = val.squeeze() if """head""" in key else val
# load HuggingFace model
snake_case : List[Any] = BitForImageClassification(__A )
model.eval()
model.load_state_dict(__A )
# create image processor
snake_case : Dict = create_transform(**resolve_data_config({} , model=__A ) )
snake_case : Optional[Any] = transform.transforms
snake_case : List[Any] = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
snake_case : Union[str, Any] = BitImageProcessor(
do_resize=__A , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__A , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=__A , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case : Dict = prepare_img()
snake_case : List[str] = transform(__A ).unsqueeze(0 )
snake_case : int = processor(__A , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(__A , __A )
# verify logits
with torch.no_grad():
snake_case : Optional[int] = model(__A )
snake_case : Dict = outputs.logits
print("""Logits:""" , logits[0, :3] )
print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] )
snake_case : int = timm_model(__A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__A , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(__A ).mkdir(exist_ok=__A )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
processor.save_pretrained(__A )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
__lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''resnetv2_50x1_bitm''',
type=str,
help='''Name of the BiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model to the hub.''',
)
__lowercase : Union[str, Any] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 36 |
"""simple docstring"""
def A__ ( A__ ) -> list[int]:
'''simple docstring'''
if length <= 0 or not isinstance(A__ , A__ ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(A__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 426 | 0 |
'''simple docstring'''
from collections import deque
class __SCREAMING_SNAKE_CASE :
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) ->None:
'''simple docstring'''
__a = process_name # process name
__a = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
__a = arrival_time
__a = burst_time # remaining burst time
__a = 0 # total time of the process wait in ready queue
__a = 0 # time from arrival time to completion time
class __SCREAMING_SNAKE_CASE :
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) ->None:
'''simple docstring'''
__a = number_of_queues
# time slice of queues that round robin algorithm applied
__a = time_slices
# unfinished process is in this ready_queue
__a = queue
# current time
__a = current_time
# finished process is in this sequence queue
__a = deque()
def __UpperCamelCase ( self ) ->list[str]:
'''simple docstring'''
__a = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def __UpperCamelCase ( self , lowerCamelCase ) ->list[int]:
'''simple docstring'''
__a = []
for i in range(len(lowerCamelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def __UpperCamelCase ( self , lowerCamelCase ) ->list[int]:
'''simple docstring'''
__a = []
for i in range(len(lowerCamelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def __UpperCamelCase ( self , lowerCamelCase ) ->list[int]:
'''simple docstring'''
__a = []
for i in range(len(lowerCamelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def __UpperCamelCase ( self , lowerCamelCase ) ->list[int]:
'''simple docstring'''
return [q.burst_time for q in queue]
def __UpperCamelCase ( self , lowerCamelCase ) ->int:
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def __UpperCamelCase ( self , lowerCamelCase ) ->deque[Process]:
'''simple docstring'''
__a = deque() # sequence deque of finished process
while len(lowerCamelCase ) != 0:
__a = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(lowerCamelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
__a = 0
# set the process's turnaround time because it is finished
__a = self.current_time - cp.arrival_time
# set the completion time
__a = self.current_time
# add the process to queue that has finished queue
finished.append(lowerCamelCase )
self.finish_queue.extend(lowerCamelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase ) ->tuple[deque[Process], deque[Process]]:
'''simple docstring'''
__a = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(lowerCamelCase ) ):
__a = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(lowerCamelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
__a = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(lowerCamelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
__a = 0
# set the finish time
__a = self.current_time
# update the process' turnaround time because it is finished
__a = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(lowerCamelCase )
self.finish_queue.extend(lowerCamelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def __UpperCamelCase ( self ) ->deque[Process]:
'''simple docstring'''
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
__a , __a = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
__UpperCamelCase : str = Process("""P1""", 0, 53)
__UpperCamelCase : Union[str, Any] = Process("""P2""", 0, 17)
__UpperCamelCase : Union[str, Any] = Process("""P3""", 0, 68)
__UpperCamelCase : Optional[Any] = Process("""P4""", 0, 24)
__UpperCamelCase : int = 3
__UpperCamelCase : Any = [17, 25]
__UpperCamelCase : Tuple = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"""queue""": deque([Pa, Pa, Pa, Pa])})
__UpperCamelCase : Optional[int] = Process("""P1""", 0, 53)
__UpperCamelCase : Union[str, Any] = Process("""P2""", 0, 17)
__UpperCamelCase : Union[str, Any] = Process("""P3""", 0, 68)
__UpperCamelCase : Optional[int] = Process("""P4""", 0, 24)
__UpperCamelCase : Optional[Any] = 3
__UpperCamelCase : Union[str, Any] = [17, 25]
__UpperCamelCase : Optional[Any] = deque([Pa, Pa, Pa, Pa])
__UpperCamelCase : int = MLFQ(number_of_queues, time_slices, queue, 0)
__UpperCamelCase : Optional[Any] = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
f"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
) | 718 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: List[Any] ) -> Dict:
"""simple docstring"""
__a , __a = image.size
__a , __a = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__a = image.resize((w, h), resample=PIL_INTERPOLATION['lanczos'] )
__a = np.array(SCREAMING_SNAKE_CASE__ ).astype(np.floataa ) / 2_5_5.0
__a = image[None].transpose(0, 3, 1, 2 )
__a = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
return 2.0 * image - 1.0
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) ->Dict:
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase )
@torch.no_grad()
def __call__( self , lowerCamelCase = None , lowerCamelCase = 1 , lowerCamelCase = 100 , lowerCamelCase = 0.0 , lowerCamelCase = None , lowerCamelCase = "pil" , lowerCamelCase = True , ) ->Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
if isinstance(lowerCamelCase , PIL.Image.Image ):
__a = 1
elif isinstance(lowerCamelCase , torch.Tensor ):
__a = image.shape[0]
else:
raise ValueError(F"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowerCamelCase )}""" )
if isinstance(lowerCamelCase , PIL.Image.Image ):
__a = preprocess(lowerCamelCase )
__a , __a = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__a = (batch_size, self.unet.config.in_channels // 2, height, width)
__a = next(self.unet.parameters() ).dtype
__a = randn_tensor(lowerCamelCase , generator=lowerCamelCase , device=self.device , dtype=lowerCamelCase )
__a = image.to(device=self.device , dtype=lowerCamelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(lowerCamelCase , device=self.device )
__a = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__a = {}
if accepts_eta:
__a = eta
for t in self.progress_bar(lowerCamelCase ):
# concat latents and low resolution image in the channel dimension.
__a = torch.cat([latents, image] , dim=1 )
__a = self.scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
# predict the noise residual
__a = self.unet(lowerCamelCase , lowerCamelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
__a = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
# decode the image latents with the VQVAE
__a = self.vqvae.decode(lowerCamelCase ).sample
__a = torch.clamp(lowerCamelCase , -1.0 , 1.0 )
__a = image / 2 + 0.5
__a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase ) | 270 | 0 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
def __a ( self : Any ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __a ( self : Union[str, Any] ):
'''simple docstring'''
a__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
a__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
a__ = "xvjiarui/stable-diffusion-2-inpainting"
a__ , a__ = FlaxStableDiffusionInpaintPipeline.from_pretrained(lowerCamelCase , safety_checker=lowerCamelCase )
a__ = "Face of a yellow cat, high resolution, sitting on a park bench"
a__ = jax.random.PRNGKey(0 )
a__ = 5_0
a__ = jax.device_count()
a__ = num_samples * [prompt]
a__ = num_samples * [init_image]
a__ = num_samples * [mask_image]
a__ , a__ , a__ = pipeline.prepare_inputs(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# shard inputs and rng
a__ = replicate(lowerCamelCase )
a__ = jax.random.split(lowerCamelCase , jax.device_count() )
a__ = shard(lowerCamelCase )
a__ = shard(lowerCamelCase )
a__ = shard(lowerCamelCase )
a__ = pipeline(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , jit=lowerCamelCase )
a__ = output.images.reshape(lowerCamelCase , 5_1_2 , 5_1_2 , 3 )
a__ = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
a__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
a__ = jnp.array(
[0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 489 |
'''simple docstring'''
def _lowerCamelCase (__lowerCamelCase : list[int] , __lowerCamelCase : list[int] , __lowerCamelCase : int ) -> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__lowerCamelCase ) )
def _lowerCamelCase (__lowerCamelCase : list[list[int]] , __lowerCamelCase : int , __lowerCamelCase : list[int] , __lowerCamelCase : int ) -> bool:
# Base Case
if index == len(__lowerCamelCase ):
return True
# Recursive Step
for i in range(__lowerCamelCase ):
if valid_coloring(graph[index] , __lowerCamelCase , __lowerCamelCase ):
# Color current vertex
a__ = i
# Validate coloring
if util_color(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index + 1 ):
return True
# Backtrack
a__ = -1
return False
def _lowerCamelCase (__lowerCamelCase : list[list[int]] , __lowerCamelCase : int ) -> list[int]:
a__ = [-1] * len(__lowerCamelCase )
if util_color(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , 0 ):
return colored_vertices
return []
| 489 | 1 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
__lowerCamelCase : str = logging.get_logger(__name__)
__lowerCamelCase : Dict = """The Nymphenburg Palace is a beautiful palace in Munich!"""
def lowercase__ ( __A: Optional[int] ,__A: int ):
'''simple docstring'''
__magic_name__ : Union[str, Any] = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1_0_2_4,
"hidden_size": 7_6_8,
"max_length": 5_1_2,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1_0_2_4,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1e-5,
"token_type_vocab_size": 2,
}
__magic_name__ : int = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__magic_name__ : List[Any] = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] ,num_layers=predefined_args['''num_layers'''] ,units=predefined_args['''units'''] ,hidden_size=predefined_args['''hidden_size'''] ,max_length=predefined_args['''max_length'''] ,num_heads=predefined_args['''num_heads'''] ,scaled=predefined_args['''scaled'''] ,dropout=predefined_args['''dropout'''] ,output_attention=__A ,output_all_encodings=__A ,use_residual=predefined_args['''use_residual'''] ,activation=predefined_args.get('''activation''' ,'''gelu''' ) ,layer_norm_eps=predefined_args.get('''layer_norm_eps''' ,__A ) ,)
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__magic_name__ : List[str] = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
__magic_name__ : Tuple = os.path.join(get_home_dir() ,'''models''' )
__magic_name__ : Tuple = _load_vocab(__A ,__A ,__A ,cls=__A )
__magic_name__ : Any = nlp.model.BERTModel(
__A ,len(__A ) ,units=predefined_args['''units'''] ,embed_size=predefined_args['''embed_size'''] ,embed_dropout=predefined_args['''embed_dropout'''] ,word_embed=predefined_args['''word_embed'''] ,use_pooler=__A ,use_token_type_embed=__A ,token_type_vocab_size=predefined_args['''token_type_vocab_size'''] ,use_classifier=__A ,use_decoder=__A ,)
original_bort.load_parameters(__A ,cast_dtype=__A ,ignore_extra=__A )
__magic_name__ : str = original_bort._collect_params_with_prefix()
# Build our config 🤗
__magic_name__ : Union[str, Any] = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(__A ),
}
__magic_name__ : str = BertConfig.from_dict(__A )
__magic_name__ : List[str] = BertForMaskedLM(__A )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(__A: Optional[Any] ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(__A: Any ,__A: Union[str, Any] ):
__magic_name__ : Optional[int] = hf_param.shape
__magic_name__ : int = to_torch(params[gluon_param] )
__magic_name__ : List[str] = gluon_param.shape
assert (
shape_hf == shape_gluon
), F'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
__magic_name__ : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight ,'''word_embed.0.weight''' )
__magic_name__ : int = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight ,'''encoder.position_weight''' )
__magic_name__ : int = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias ,'''encoder.layer_norm.beta''' )
__magic_name__ : Any = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight ,'''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__magic_name__ : Union[str, Any] = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__magic_name__ : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
__magic_name__ : BertSelfAttention = layer.attention.self
__magic_name__ : Any = check_and_map_params(
self_attn.key.bias.data ,F'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
__magic_name__ : Union[str, Any] = check_and_map_params(
self_attn.key.weight.data ,F'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
__magic_name__ : str = check_and_map_params(
self_attn.query.bias.data ,F'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
__magic_name__ : Union[str, Any] = check_and_map_params(
self_attn.query.weight.data ,F'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
__magic_name__ : Tuple = check_and_map_params(
self_attn.value.bias.data ,F'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
__magic_name__ : Optional[int] = check_and_map_params(
self_attn.value.weight.data ,F'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
__magic_name__ : BertSelfOutput = layer.attention.output
__magic_name__ : int = check_and_map_params(
self_output.dense.bias ,F'''encoder.transformer_cells.{i}.proj.bias''' )
__magic_name__ : Union[str, Any] = check_and_map_params(
self_output.dense.weight ,F'''encoder.transformer_cells.{i}.proj.weight''' )
__magic_name__ : Optional[Any] = check_and_map_params(
self_output.LayerNorm.bias ,F'''encoder.transformer_cells.{i}.layer_norm.beta''' )
__magic_name__ : Tuple = check_and_map_params(
self_output.LayerNorm.weight ,F'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
__magic_name__ : BertIntermediate = layer.intermediate
__magic_name__ : Union[str, Any] = check_and_map_params(
intermediate.dense.bias ,F'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
__magic_name__ : List[Any] = check_and_map_params(
intermediate.dense.weight ,F'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
__magic_name__ : BertOutput = layer.output
__magic_name__ : List[Any] = check_and_map_params(
bert_output.dense.bias ,F'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
__magic_name__ : List[Any] = check_and_map_params(
bert_output.dense.weight ,F'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
__magic_name__ : Optional[int] = check_and_map_params(
bert_output.LayerNorm.bias ,F'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
__magic_name__ : Union[str, Any] = check_and_map_params(
bert_output.LayerNorm.weight ,F'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__magic_name__ : Dict = RobertaTokenizer.from_pretrained('''roberta-base''' )
__magic_name__ : Tuple = tokenizer.encode_plus(__A )["input_ids"]
# Get gluon output
__magic_name__ : Optional[Any] = mx.nd.array([input_ids] )
__magic_name__ : Any = original_bort(inputs=__A ,token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(__A )
__magic_name__ : List[Any] = BertModel.from_pretrained(__A )
hf_bort_model.eval()
__magic_name__ : Optional[int] = tokenizer.encode_plus(__A ,return_tensors='''pt''' )
__magic_name__ : Tuple = hf_bort_model(**__A )[0]
__magic_name__ : Tuple = output_gluon[0].asnumpy()
__magic_name__ : Optional[Any] = output_hf[0].detach().numpy()
__magic_name__ : Tuple = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__magic_name__ : Optional[int] = np.allclose(__A ,__A ,atol=1e-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' ,__A )
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowerCamelCase : Optional[Any] = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 719 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase ( _lowerCamelCase ,unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ =BlenderbotSmallTokenizer
UpperCamelCase__ =False
def UpperCAmelCase__ ( self : Union[str, Any] ) -> str:
super().setUp()
__magic_name__ : Union[str, Any] = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
__magic_name__ : Tuple = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
__magic_name__ : Tuple = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
__magic_name__ : List[Any] = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
__magic_name__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__magic_name__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCamelCase_ ) )
def UpperCAmelCase__ ( self : List[str] , **lowerCamelCase_ : Optional[Any] ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ )
def UpperCAmelCase__ ( self : Dict , lowerCamelCase_ : Union[str, Any] ) -> Optional[int]:
__magic_name__ : Union[str, Any] = '''adapt act apte'''
__magic_name__ : Dict = '''adapt act apte'''
return input_text, output_text
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any:
__magic_name__ : int = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__magic_name__ : str = '''adapt act apte'''
__magic_name__ : Any = ['''adapt''', '''act''', '''ap@@''', '''te''']
__magic_name__ : List[str] = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
__magic_name__ : Union[str, Any] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__magic_name__ : List[Any] = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , lowerCamelCase_ )
def UpperCAmelCase__ ( self : int ) -> int:
__magic_name__ : Any = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [1384]
__magic_name__ : Dict = '''I am a small frog.'''
__magic_name__ : Tuple = tok([src_text] , padding=lowerCamelCase_ , truncation=lowerCamelCase_ )['''input_ids''']
__magic_name__ : Tuple = tok.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def UpperCAmelCase__ ( self : Tuple ) -> Dict:
__magic_name__ : Tuple = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
__magic_name__ : Any = '''I am a small frog .'''
__magic_name__ : List[str] = '''.'''
__magic_name__ : Tuple = tok(lowerCamelCase_ )['''input_ids''']
__magic_name__ : Optional[Any] = tok(lowerCamelCase_ )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 501 | 0 |
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : float = 0 ) -> None:
a_ , a_ : List[Any] = row, column
a_ : List[Any] = [[default_value for c in range(SCREAMING_SNAKE_CASE__ )] for r in range(SCREAMING_SNAKE_CASE__ )]
def __str__( self : List[Any] ) -> str:
a_ : List[str] = F"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
a_ : List[Any] = 0
for row_vector in self.array:
for obj in row_vector:
a_ : str = max(SCREAMING_SNAKE_CASE__ , len(str(SCREAMING_SNAKE_CASE__ ) ) )
a_ : Optional[int] = F"""%{max_element_length}s"""
# Make string and return
def single_line(SCREAMING_SNAKE_CASE__ : list[float] ) -> str:
nonlocal string_format_identifier
a_ : List[str] = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(SCREAMING_SNAKE_CASE__ ) for row_vector in self.array )
return s
def __repr__( self : List[Any] ) -> str:
return str(self )
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : tuple[int, int] ) -> bool:
if not (isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) and len(SCREAMING_SNAKE_CASE__ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : tuple[int, int] ) -> Any:
assert self.validate_indicies(SCREAMING_SNAKE_CASE__ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : tuple[int, int] , SCREAMING_SNAKE_CASE__ : float ) -> None:
assert self.validate_indicies(SCREAMING_SNAKE_CASE__ )
a_ : List[str] = value
def __add__( self : List[str] , SCREAMING_SNAKE_CASE__ : Matrix ) -> Matrix:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert self.row == another.row and self.column == another.column
# Add
a_ : List[Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
a_ : Any = self[r, c] + another[r, c]
return result
def __neg__( self : Optional[int] ) -> Matrix:
a_ : Any = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
a_ : Tuple = -self[r, c]
return result
def __sub__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Matrix ) -> Matrix:
return self + (-another)
def __mul__( self : int , SCREAMING_SNAKE_CASE__ : int | float | Matrix ) -> Matrix:
if isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ): # Scalar multiplication
a_ : int = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
a_ : List[Any] = self[r, c] * another
return result
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): # Matrix multiplication
assert self.column == another.row
a_ : List[Any] = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
a_ : Any = F"""Unsupported type given for another ({type(SCREAMING_SNAKE_CASE__ )})"""
raise TypeError(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : str ) -> Matrix:
a_ : List[Any] = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
a_ : Any = self[r, c]
return result
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : Matrix , SCREAMING_SNAKE_CASE__ : Matrix ) -> Any:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
a_ : List[Any] = v.transpose()
a_ : Union[str, Any] = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE_ ( ) -> None:
"""simple docstring"""
a_ : Any = Matrix(3 , 3 , 0 )
for i in range(3 ):
a_ : int = 1
print(F"""a^(-1) is {ainv}""" )
# u, v
a_ : Dict = Matrix(3 , 1 , 0 )
a_ , a_ , a_ : Tuple = 1, 2, -3
a_ : List[Any] = Matrix(3 , 1 , 0 )
a_ , a_ , a_ : Union[str, Any] = 4, -2, 5
print(F"""u is {u}""" )
print(F"""v is {v}""" )
print(F"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(F"""(a + uv^T)^(-1) is {ainv.sherman_morrison(__A , __A )}""" )
def SCREAMING_SNAKE_CASE_ ( ) -> None:
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 570 |
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase__ ):
snake_case__ : List[str] = ['''onnx''']
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Dict ) -> Any:
requires_backends(self , ['onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[Any] , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]:
requires_backends(cls , ['onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : str , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]:
requires_backends(cls , ['onnx'] )
| 570 | 1 |
'''simple docstring'''
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowerCAmelCase ( unittest.TestCase ):
a : Union[str, Any] = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def lowercase ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
_SCREAMING_SNAKE_CASE = hf_hub_download(
repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
_SCREAMING_SNAKE_CASE = VideoClassificationPipeline(model=UpperCamelCase , image_processor=UpperCamelCase , top_k=2 )
_SCREAMING_SNAKE_CASE = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def lowercase ( self , UpperCamelCase , UpperCamelCase ):
for example in examples:
_SCREAMING_SNAKE_CASE = video_classifier(UpperCamelCase )
self.assertEqual(
UpperCamelCase , [
{"score": ANY(UpperCamelCase ), "label": ANY(UpperCamelCase )},
{"score": ANY(UpperCamelCase ), "label": ANY(UpperCamelCase )},
] , )
@require_torch
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
_SCREAMING_SNAKE_CASE = VideoMAEFeatureExtractor(
size={"shortest_edge": 10} , crop_size={"height": 10, "width": 10} )
_SCREAMING_SNAKE_CASE = pipeline(
"video-classification" , model=UpperCamelCase , feature_extractor=UpperCamelCase , frame_sampling_rate=4 )
_SCREAMING_SNAKE_CASE = hf_hub_download(repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
_SCREAMING_SNAKE_CASE = video_classifier(UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [{"score": 0.51_99, "label": "LABEL_0"}, {"score": 0.48_01, "label": "LABEL_1"}] , )
_SCREAMING_SNAKE_CASE = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
[{"score": 0.51_99, "label": "LABEL_0"}, {"score": 0.48_01, "label": "LABEL_1"}],
[{"score": 0.51_99, "label": "LABEL_0"}, {"score": 0.48_01, "label": "LABEL_1"}],
] , )
@require_tf
def lowercase ( self ):
pass | 493 |
'''simple docstring'''
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
_snake_case : Optional[int] = (
"""https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"""
)
_snake_case : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
def _a ( ):
_SCREAMING_SNAKE_CASE = "https://pypi.org/pypi/diffusers/json"
_SCREAMING_SNAKE_CASE = json.loads(request.urlopen(_SCREAMING_SNAKE_CASE ).read() )["releases"].keys()
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : version.Version(_SCREAMING_SNAKE_CASE ) )
def _a ( ):
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(_SCREAMING_SNAKE_CASE )
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = Path(_SCREAMING_SNAKE_CASE ) / "__init__.py"
if not init_path.exists():
init_path.touch()
def _a ( _SCREAMING_SNAKE_CASE : Union[str, os.PathLike] ):
init_hf_modules()
_SCREAMING_SNAKE_CASE = Path(_SCREAMING_SNAKE_CASE ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = dynamic_module_path / "__init__.py"
if not init_path.exists():
init_path.touch()
def _a ( _SCREAMING_SNAKE_CASE : Optional[Any] ):
with open(_SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" ) as f:
_SCREAMING_SNAKE_CASE = f.read()
# Imports of the form `import .xxx`
_SCREAMING_SNAKE_CASE = re.findall("^\s*import\s+\.(\S+)\s*$" , _SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , _SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Unique-ify
return list(set(_SCREAMING_SNAKE_CASE ) )
def _a ( _SCREAMING_SNAKE_CASE : List[str] ):
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = [module_file]
_SCREAMING_SNAKE_CASE = []
# Let's recurse through all relative imports
while not no_change:
_SCREAMING_SNAKE_CASE = []
for f in files_to_check:
new_imports.extend(get_relative_imports(_SCREAMING_SNAKE_CASE ) )
_SCREAMING_SNAKE_CASE = Path(_SCREAMING_SNAKE_CASE ).parent
_SCREAMING_SNAKE_CASE = [str(module_path / m ) for m in new_imports]
_SCREAMING_SNAKE_CASE = [f for f in new_import_files if f not in all_relative_imports]
_SCREAMING_SNAKE_CASE = [F'{f}.py' for f in new_import_files]
_SCREAMING_SNAKE_CASE = len(_SCREAMING_SNAKE_CASE ) == 0
all_relative_imports.extend(_SCREAMING_SNAKE_CASE )
return all_relative_imports
def _a ( _SCREAMING_SNAKE_CASE : str ):
with open(_SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" ) as f:
_SCREAMING_SNAKE_CASE = f.read()
# Imports of the form `import xxx`
_SCREAMING_SNAKE_CASE = re.findall("^\s*import\s+(\S+)\s*$" , _SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("^\s*from\s+(\S+)\s+import" , _SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Only keep the top-level module
_SCREAMING_SNAKE_CASE = [imp.split("." )[0] for imp in imports if not imp.startswith("." )]
# Unique-ify and test we got them all
_SCREAMING_SNAKE_CASE = list(set(_SCREAMING_SNAKE_CASE ) )
_SCREAMING_SNAKE_CASE = []
for imp in imports:
try:
importlib.import_module(_SCREAMING_SNAKE_CASE )
except ImportError:
missing_packages.append(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ImportError(
"This modeling file requires the following packages that were not found in your environment: "
F'{", ".join(_SCREAMING_SNAKE_CASE )}. Run `pip install {" ".join(_SCREAMING_SNAKE_CASE )}`' )
return get_relative_imports(_SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : int ):
_SCREAMING_SNAKE_CASE = module_path.replace(os.path.sep , "." )
_SCREAMING_SNAKE_CASE = importlib.import_module(_SCREAMING_SNAKE_CASE )
if class_name is None:
return find_pipeline_class(_SCREAMING_SNAKE_CASE )
return getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE : List[Any] ):
from ..pipelines import DiffusionPipeline
_SCREAMING_SNAKE_CASE = dict(inspect.getmembers(_SCREAMING_SNAKE_CASE , inspect.isclass ) )
_SCREAMING_SNAKE_CASE = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , _SCREAMING_SNAKE_CASE )
and cls.__module__.split("." )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
F' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
F' {loaded_module}.' )
_SCREAMING_SNAKE_CASE = cls
return pipeline_class
def _a ( _SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : Optional[Dict[str, str]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[bool, str]] = None , _SCREAMING_SNAKE_CASE : Optional[str] = None , _SCREAMING_SNAKE_CASE : bool = False , ):
_SCREAMING_SNAKE_CASE = str(_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE = module_file_or_url
_SCREAMING_SNAKE_CASE = "local"
elif pretrained_model_name_or_path.count("/" ) == 0:
_SCREAMING_SNAKE_CASE = get_diffusers_versions()
# cut ".dev0"
_SCREAMING_SNAKE_CASE = "v" + ".".join(__version__.split("." )[:3] )
# retrieve github version that matches
if revision is None:
_SCREAMING_SNAKE_CASE = latest_version if latest_version[1:] in available_versions else "main"
logger.info(F'Defaulting to latest_version: {revision}.' )
elif revision in available_versions:
_SCREAMING_SNAKE_CASE = F'v{revision}'
elif revision == "main":
_SCREAMING_SNAKE_CASE = revision
else:
raise ValueError(
F'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
F' {", ".join(available_versions + ["main"] )}.' )
# community pipeline on GitHub
_SCREAMING_SNAKE_CASE = COMMUNITY_PIPELINES_URL.format(revision=_SCREAMING_SNAKE_CASE , pipeline=_SCREAMING_SNAKE_CASE )
try:
_SCREAMING_SNAKE_CASE = cached_download(
_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , resume_download=_SCREAMING_SNAKE_CASE , local_files_only=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , )
_SCREAMING_SNAKE_CASE = "git"
_SCREAMING_SNAKE_CASE = pretrained_model_name_or_path + ".py"
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
else:
try:
# Load from URL or cache if already cached
_SCREAMING_SNAKE_CASE = hf_hub_download(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , resume_download=_SCREAMING_SNAKE_CASE , local_files_only=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , )
_SCREAMING_SNAKE_CASE = os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/" ) ) )
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
# Check we have all the requirements in our environment
_SCREAMING_SNAKE_CASE = check_imports(_SCREAMING_SNAKE_CASE )
# Now we move the module inside our cached dynamic modules.
_SCREAMING_SNAKE_CASE = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = Path(_SCREAMING_SNAKE_CASE ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(_SCREAMING_SNAKE_CASE , submodule_path / module_file )
for module_needed in modules_needed:
_SCREAMING_SNAKE_CASE = F'{module_needed}.py'
shutil.copy(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE = use_auth_token
elif use_auth_token is True:
_SCREAMING_SNAKE_CASE = HfFolder.get_token()
else:
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = model_info(_SCREAMING_SNAKE_CASE , revision=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
_SCREAMING_SNAKE_CASE = submodule_path / commit_hash
_SCREAMING_SNAKE_CASE = full_submodule + os.path.sep + commit_hash
create_dynamic_module(_SCREAMING_SNAKE_CASE )
if not (submodule_path / module_file).exists():
shutil.copy(_SCREAMING_SNAKE_CASE , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
_SCREAMING_SNAKE_CASE , F'{module_needed}.py' , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , resume_download=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , revision=_SCREAMING_SNAKE_CASE , local_files_only=_SCREAMING_SNAKE_CASE , )
return os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _a ( _SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[str] = None , _SCREAMING_SNAKE_CASE : Optional[Union[str, os.PathLike]] = None , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : bool = False , _SCREAMING_SNAKE_CASE : Optional[Dict[str, str]] = None , _SCREAMING_SNAKE_CASE : Optional[Union[bool, str]] = None , _SCREAMING_SNAKE_CASE : Optional[str] = None , _SCREAMING_SNAKE_CASE : bool = False , **_SCREAMING_SNAKE_CASE : Tuple , ):
_SCREAMING_SNAKE_CASE = get_cached_module_file(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , resume_download=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , revision=_SCREAMING_SNAKE_CASE , local_files_only=_SCREAMING_SNAKE_CASE , )
return get_class_in_module(_SCREAMING_SNAKE_CASE , final_module.replace(".py" , "" ) ) | 493 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : int = {
"""configuration_upernet""": ["""UperNetConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Tuple = [
"""UperNetForSemanticSegmentation""",
"""UperNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 257 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def _A ( SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
a__ : Optional[Any] =SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
a__ : Optional[int] =4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
a__ : int =4
a__ : Optional[int] =48
a__ : str ="pixelshuffle_aux"
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
a__ : str =[6, 6, 6, 6]
a__ : Optional[int] =60
a__ : Any =[6, 6, 6, 6]
a__ : int ="pixelshuffledirect"
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
a__ : List[str] =4
a__ : Union[str, Any] ="nearest+conv"
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
a__ : str =1
a__ : Optional[Any] =1
a__ : str =126
a__ : Optional[Any] =7
a__ : Optional[int] =2_5_5.0
a__ : str =""
return config
def _A ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
if "patch_embed.proj" in name and "layers" not in name:
a__ : Any =name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
a__ : str =name.replace("patch_embed.norm" , "embeddings.patch_embeddings.layernorm" )
if "layers" in name:
a__ : Union[str, Any] =name.replace("layers" , "encoder.stages" )
if "residual_group.blocks" in name:
a__ : List[Any] =name.replace("residual_group.blocks" , "layers" )
if "attn.proj" in name:
a__ : Union[str, Any] =name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
a__ : int =name.replace("attn" , "attention.self" )
if "norm1" in name:
a__ : List[Any] =name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
a__ : Optional[int] =name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
a__ : Dict =name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
a__ : Optional[int] =name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
a__ : List[Any] =name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
a__ : Optional[int] =name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
a__ : Optional[Any] =name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
a__ : List[str] =name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if "patch_embed.proj" in name:
a__ : List[Any] =name.replace("patch_embed.proj" , "patch_embed.projection" )
if name == "norm.weight":
a__ : Dict ="layernorm.weight"
if name == "norm.bias":
a__ : Any ="layernorm.bias"
if "conv_first" in name:
a__ : Tuple =name.replace("conv_first" , "first_convolution" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
a__ : List[str] =name.replace("conv_last" , "final_convolution" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
a__ : str =name.replace("conv_before_upsample.0" , "conv_before_upsample" )
if "upsample.0" in name:
a__ : Any =name.replace("upsample.0" , "upsample.convolution_0" )
if "upsample.2" in name:
a__ : Optional[int] =name.replace("upsample.2" , "upsample.convolution_1" )
a__ : Any ="upsample." + name
elif config.upsampler == "pixelshuffledirect":
a__ : str =name.replace("upsample.0.weight" , "upsample.conv.weight" )
a__ : Any =name.replace("upsample.0.bias" , "upsample.conv.bias" )
else:
pass
else:
a__ : Dict ="swin2sr." + name
return name
def _A ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
a__ : Dict =orig_state_dict.pop(SCREAMING_SNAKE_CASE )
if "qkv" in key:
a__ : str =key.split("." )
a__ : Optional[int] =int(key_split[1] )
a__ : Dict =int(key_split[4] )
a__ : List[Any] =config.embed_dim
if "weight" in key:
a__ : List[Any] =val[:dim, :]
a__ : List[str] =val[dim : dim * 2, :]
a__ : Dict =val[-dim:, :]
else:
a__ : int =val[:dim]
a__ : Union[str, Any] =val[dim : dim * 2]
a__ : Tuple =val[-dim:]
pass
else:
a__ : Union[str, Any] =val
return orig_state_dict
def _A ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
a__ : Optional[Any] =get_config(SCREAMING_SNAKE_CASE )
a__ : Union[str, Any] =SwinaSRForImageSuperResolution(SCREAMING_SNAKE_CASE )
model.eval()
a__ : Union[str, Any] =torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location="cpu" )
a__ : Dict =convert_state_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ , a__ : List[Any] =model.load_state_dict(SCREAMING_SNAKE_CASE , strict=SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
raise ValueError("Missing keys when converting: {}".format(SCREAMING_SNAKE_CASE ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f'''Unexpected key {key} in state_dict''' )
# verify values
a__ : str ="https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"
a__ : List[Any] =Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ).convert("RGB" )
a__ : Dict =SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
a__ : List[str] =126 if "Jpeg" in checkpoint_url else 256
a__ : Optional[Any] =Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
a__ : Dict =transforms(SCREAMING_SNAKE_CASE ).unsqueeze(0 )
if config.num_channels == 1:
a__ : Tuple =pixel_values[:, 0, :, :].unsqueeze(1 )
a__ : Union[str, Any] =model(SCREAMING_SNAKE_CASE )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
a__ : str =torch.Size([1, 3, 512, 512] )
a__ : List[str] =torch.tensor(
[[-0.7_0_8_7, -0.7_1_3_8, -0.6_7_2_1], [-0.8_3_4_0, -0.8_0_9_5, -0.7_2_9_8], [-0.9_1_4_9, -0.8_4_1_4, -0.7_9_4_0]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
a__ : List[Any] =torch.Size([1, 3, 1_024, 1_024] )
a__ : List[str] =torch.tensor(
[[-0.7_7_7_5, -0.8_1_0_5, -0.8_9_3_3], [-0.7_7_6_4, -0.8_3_5_6, -0.9_2_2_5], [-0.7_9_7_6, -0.8_6_8_6, -0.9_5_7_9]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
a__ : Tuple =torch.Size([1, 3, 1_024, 1_024] )
a__ : Optional[int] =torch.tensor(
[[-0.8_0_3_5, -0.7_5_0_4, -0.7_4_9_1], [-0.8_5_3_8, -0.8_1_2_4, -0.7_7_8_2], [-0.8_8_0_4, -0.8_6_5_1, -0.8_4_9_3]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
a__ : Tuple =torch.Size([1, 3, 512, 512] )
a__ : str =torch.tensor(
[[-0.7_6_6_9, -0.8_6_6_2, -0.8_7_6_7], [-0.8_8_1_0, -0.9_9_6_2, -0.9_8_2_0], [-0.9_3_4_0, -1.0_3_2_2, -1.1_1_4_9]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
a__ : Optional[int] =torch.Size([1, 3, 1_024, 1_024] )
a__ : Optional[Any] =torch.tensor(
[[-0.5_2_3_8, -0.5_5_5_7, -0.6_3_2_1], [-0.6_0_1_6, -0.5_9_0_3, -0.6_3_9_1], [-0.6_2_4_4, -0.6_3_3_4, -0.6_8_8_9]] )
assert (
outputs.reconstruction.shape == expected_shape
), f'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'''
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-3 )
print("Looks ok!" )
a__ : int ={
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth": (
"swin2SR-classical-sr-x2-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth": (
"swin2SR-classical-sr-x4-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth": (
"swin2SR-compressed-sr-x4-48"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth": (
"swin2SR-lightweight-x2-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth": (
"swin2SR-realworld-sr-x4-64-bsrgan-psnr"
),
}
a__ : Any =url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
model.push_to_hub(f'''caidas/{model_name}''' )
processor.push_to_hub(f'''caidas/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""",
type=str,
help="""URL of the original Swin2SR checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the converted model to the hub.""")
UpperCAmelCase : Optional[Any] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 563 | 0 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
lowerCAmelCase__ : List[Any] = np.zeros((n + 1,) )
lowerCAmelCase__ : Any = ya
lowerCAmelCase__ : List[Any] = xa
for k in range(UpperCamelCase ):
lowerCAmelCase__ : int = y[k] + step_size * ode_func(UpperCamelCase , y[k] )
lowerCAmelCase__ : Optional[Any] = y[k] + (
(step_size / 2) * (ode_func(UpperCamelCase , y[k] ) + ode_func(x + step_size , UpperCamelCase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 160 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
while a != 0:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = b % a, a
return b
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if gcd(UpperCamelCase , UpperCamelCase ) != 1:
lowerCAmelCase__ : List[Any] = f"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(UpperCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = 1, 0, a
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = 0, 1, m
while va != 0:
lowerCAmelCase__ : Optional[int] = ua // va
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 160 | 1 |
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : List[str] =get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( __a , unittest.TestCase ):
__lowercase = DebertaVaTokenizer
__lowercase = DebertaVaTokenizerFast
__lowercase = True
__lowercase = True
def UpperCAmelCase_ ( self :str )-> str:
super().setUp()
# We have a SentencePiece fixture for testing
A__ = DebertaVaTokenizer(a__ , unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self :str , lowercase_ :int )-> List[str]:
A__ = "this is a test"
A__ = "this is a test"
return input_text, output_text
def UpperCAmelCase_ ( self :Tuple )-> List[Any]:
A__ = "<pad>"
A__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def UpperCAmelCase_ ( self :str )-> Optional[int]:
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "[PAD]" )
self.assertEqual(len(a__ ) , 3_00_01 )
def UpperCAmelCase_ ( self :Tuple )-> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 )
def UpperCAmelCase_ ( self :List[Any] )-> str:
# fmt: off
A__ = " \tHeLLo!how \n Are yoU? "
A__ = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
A__ = DebertaVaTokenizer(a__ , do_lower_case=a__ )
A__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(a__ , add_special_tokens=a__ ) )
self.assertListEqual(a__ , a__ )
A__ = DebertaVaTokenizerFast(a__ , do_lower_case=a__ )
A__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__ , add_special_tokens=a__ ) )
self.assertListEqual(a__ , a__ )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def UpperCAmelCase_ ( self :Optional[int] )-> Optional[Any]:
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def UpperCAmelCase_ ( self :List[Any] )-> int:
pass
def UpperCAmelCase_ ( self :Dict )-> List[str]:
# fmt: off
A__ = "I was born in 92000, and this is falsé."
A__ = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
A__ = DebertaVaTokenizer(a__ , split_by_punct=a__ )
A__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(a__ , add_special_tokens=a__ ) )
self.assertListEqual(a__ , a__ )
A__ = DebertaVaTokenizerFast(a__ , split_by_punct=a__ )
A__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__ , add_special_tokens=a__ ) )
self.assertListEqual(a__ , a__ )
def UpperCAmelCase_ ( self :List[Any] )-> List[Any]:
# fmt: off
A__ = "I was born in 92000, and this is falsé."
A__ = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
A__ = DebertaVaTokenizer(a__ , do_lower_case=a__ , split_by_punct=a__ )
A__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(a__ , add_special_tokens=a__ ) )
self.assertListEqual(a__ , a__ )
A__ = DebertaVaTokenizerFast(a__ , do_lower_case=a__ , split_by_punct=a__ )
A__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__ , add_special_tokens=a__ ) )
self.assertListEqual(a__ , a__ )
def UpperCAmelCase_ ( self :Any )-> Optional[int]:
# fmt: off
A__ = "I was born in 92000, and this is falsé."
A__ = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
A__ = DebertaVaTokenizer(a__ , do_lower_case=a__ , split_by_punct=a__ )
A__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(a__ , add_special_tokens=a__ ) )
self.assertListEqual(a__ , a__ )
A__ = DebertaVaTokenizerFast(a__ , do_lower_case=a__ , split_by_punct=a__ )
A__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__ , add_special_tokens=a__ ) )
self.assertListEqual(a__ , a__ )
def UpperCAmelCase_ ( self :Optional[Any] )-> str:
# fmt: off
A__ = "I was born in 92000, and this is falsé."
A__ = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
A__ = DebertaVaTokenizer(a__ , do_lower_case=a__ , split_by_punct=a__ )
A__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(a__ , add_special_tokens=a__ ) )
self.assertListEqual(a__ , a__ )
A__ = DebertaVaTokenizerFast(a__ , do_lower_case=a__ , split_by_punct=a__ )
A__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__ , add_special_tokens=a__ ) )
self.assertListEqual(a__ , a__ )
def UpperCAmelCase_ ( self :Union[str, Any] )-> Dict:
# fmt: off
A__ = " \tHeLLo!how \n Are yoU? "
A__ = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
A__ = DebertaVaTokenizer(a__ , do_lower_case=a__ , split_by_punct=a__ )
A__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(a__ , add_special_tokens=a__ ) )
self.assertListEqual(a__ , a__ )
A__ = DebertaVaTokenizerFast(a__ , do_lower_case=a__ , split_by_punct=a__ )
A__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__ , add_special_tokens=a__ ) )
self.assertListEqual(a__ , a__ )
def UpperCAmelCase_ ( self :str )-> Optional[Any]:
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = "I was born in 92000, and this is falsé."
A__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(a__ , add_special_tokens=a__ ) )
A__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(a__ , add_special_tokens=a__ ) )
self.assertListEqual(a__ , a__ )
A__ = tokenizer.encode(a__ , add_special_tokens=a__ )
A__ = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
A__ = self.get_rust_tokenizer()
A__ = tokenizer.encode(a__ )
A__ = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
def UpperCAmelCase_ ( self :Tuple )-> Optional[Any]:
A__ = "This is a test"
A__ = [13, 1, 43_98, 25, 21, 12_89]
A__ = ["▁", "T", "his", "▁is", "▁a", "▁test"]
A__ = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
A__ = DebertaVaTokenizer(a__ , keep_accents=a__ )
A__ = DebertaVaTokenizerFast(a__ , keep_accents=a__ )
A__ = tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
A__ = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
A__ = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(a__ , a__ )
A__ = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
A__ = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
A__ = rust_tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(a__ , a__ )
# fmt: off
A__ = "I was born in 92000, and this is falsé."
A__ = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9]
A__ = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
A__ = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
A__ = tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
A__ = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
A__ = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(a__ , a__ )
A__ = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
A__ = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
A__ = rust_tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(a__ , a__ )
def UpperCAmelCase_ ( self :List[Any] )-> Tuple:
A__ = DebertaVaTokenizer(a__ )
A__ = tokenizer.encode("sequence builders" )
A__ = tokenizer.encode("multi-sequence build" )
A__ = tokenizer.build_inputs_with_special_tokens(a__ )
A__ = tokenizer.build_inputs_with_special_tokens(a__ , a__ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , a__ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , a__ , )
@slow
def UpperCAmelCase_ ( self :Tuple )-> str:
# fmt: off
A__ = {"input_ids": [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , )
| 440 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _SCREAMING_SNAKE_CASE ( __a ):
__SCREAMING_SNAKE_CASE :Optional[int] = """ClapFeatureExtractor"""
__SCREAMING_SNAKE_CASE :List[Any] = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self : Optional[Any] , a__ : Dict , a__ : Dict ):
super().__init__(a__ , a__ )
def __call__( self : Dict , a__ : List[str]=None , a__ : List[Any]=None , a__ : Any=None , **a__ : Tuple ):
__magic_name__ = kwargs.pop('''sampling_rate''' , a__ )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
__magic_name__ = self.tokenizer(a__ , return_tensors=a__ , **a__ )
if audios is not None:
__magic_name__ = self.feature_extractor(
a__ , sampling_rate=a__ , return_tensors=a__ , **a__ )
if text is not None and audios is not None:
__magic_name__ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a__ ) , tensor_type=a__ )
def snake_case__ ( self : List[Any] , *a__ : str , **a__ : List[str] ):
return self.tokenizer.batch_decode(*a__ , **a__ )
def snake_case__ ( self : int , *a__ : Tuple , **a__ : Tuple ):
return self.tokenizer.decode(*a__ , **a__ )
@property
def snake_case__ ( self : Any ):
__magic_name__ = self.tokenizer.model_input_names
__magic_name__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 432 | 0 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class __A :
"""simple docstring"""
UpperCamelCase__ : str =None
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[Any] =self.feature_extraction_class(**self.feat_extract_dict )
__UpperCamelCase : List[str] =json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : int =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase : Optional[int] =os.path.join(lowerCamelCase__ , 'feat_extract.json' )
feat_extract_first.to_json_file(lowerCamelCase__ )
__UpperCamelCase : List[str] =self.feature_extraction_class.from_json_file(lowerCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase : Union[str, Any] =feat_extract_first.save_pretrained(lowerCamelCase__ )[0]
check_json_file_has_correct_format(lowerCamelCase__ )
__UpperCamelCase : Optional[int] =self.feature_extraction_class.from_pretrained(lowerCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =self.feature_extraction_class()
self.assertIsNotNone(lowerCamelCase__ )
| 721 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def A ( a_ = "" ) -> dict[str, float]:
__UpperCamelCase : Tuple =url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250'
__UpperCamelCase : Optional[int] =BeautifulSoup(requests.get(a_ ).text ,'html.parser' )
__UpperCamelCase : Union[str, Any] =soup.find_all('td' ,attrs='titleColumn' )
__UpperCamelCase : Any =soup.find_all('td' ,class_='ratingColumn imdbRating' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(a_ ,a_ )
}
def A ( a_ = "IMDb_Top_250_Movies.csv" ) -> None:
__UpperCamelCase : Dict =get_imdb_top_aaa_movies()
with open(a_ ,'w' ,newline='' ) as out_file:
__UpperCamelCase : Any =csv.writer(a_ )
writer.writerow(['Movie title', 'IMDb rating'] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 154 | 0 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _A :
def __init__( self : Tuple , _A : int , _A : int=13 , _A : Any=30 , _A : int=2 , _A : str=3 , _A : Tuple=True , _A : Union[str, Any]=True , _A : int=32 , _A : Union[str, Any]=5 , _A : Union[str, Any]=4 , _A : List[Any]=37 , _A : Tuple="gelu" , _A : Union[str, Any]=0.1 , _A : Any=0.1 , _A : Optional[Any]=10 , _A : List[str]=0.02 , _A : Dict=3 , _A : Tuple=None , _A : str=2 , ) -> Optional[Any]:
"""simple docstring"""
lowercase : Dict = parent
lowercase : Optional[int] = batch_size
lowercase : Any = image_size
lowercase : Any = patch_size
lowercase : List[str] = num_channels
lowercase : Optional[Any] = is_training
lowercase : List[Any] = use_labels
lowercase : Optional[Any] = hidden_size
lowercase : int = num_hidden_layers
lowercase : Union[str, Any] = num_attention_heads
lowercase : Union[str, Any] = intermediate_size
lowercase : Union[str, Any] = hidden_act
lowercase : Optional[Any] = hidden_dropout_prob
lowercase : List[Any] = attention_probs_dropout_prob
lowercase : Tuple = type_sequence_label_size
lowercase : str = initializer_range
lowercase : Union[str, Any] = scope
lowercase : int = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
lowercase : List[str] = (image_size // patch_size) ** 2
lowercase : Union[str, Any] = num_patches + 2
def __a ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
lowercase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : List[Any] = None
if self.use_labels:
lowercase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : str = self.get_config()
return config, pixel_values, labels
def __a ( self : Tuple ) -> Tuple:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __a ( self : List[str] , _A : Optional[int] , _A : Optional[Any] , _A : List[Any] ) -> Dict:
"""simple docstring"""
lowercase : Any = DeiTModel(config=_A )
model.to(_A )
model.eval()
lowercase : Union[str, Any] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : Union[str, Any] , _A : Any , _A : Optional[int] , _A : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Optional[int] = DeiTForMaskedImageModeling(config=_A )
model.to(_A )
model.eval()
lowercase : Any = model(_A )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase : List[Any] = 1
lowercase : Union[str, Any] = DeiTForMaskedImageModeling(_A )
model.to(_A )
model.eval()
lowercase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase : Optional[Any] = model(_A )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __a ( self : List[str] , _A : Optional[int] , _A : Optional[Any] , _A : List[str] ) -> Any:
"""simple docstring"""
lowercase : Union[str, Any] = self.type_sequence_label_size
lowercase : List[Any] = DeiTForImageClassification(_A )
model.to(_A )
model.eval()
lowercase : Tuple = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase : Any = 1
lowercase : Optional[Any] = DeiTForImageClassification(_A )
model.to(_A )
model.eval()
lowercase : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase : Optional[int] = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase : str = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Union[str, Any] = config_and_inputs
lowercase : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _A ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : int = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_UpperCamelCase : Tuple = (
{
'''feature-extraction''': DeiTModel,
'''image-classification''': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : Any = False
_UpperCamelCase : Any = False
def __a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase : Optional[int] = DeiTModelTester(self )
lowercase : Optional[Any] = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def __a ( self : List[Any] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def __a ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
pass
def __a ( self : int ) -> str:
"""simple docstring"""
lowercase , lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : int = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , nn.Linear ) )
def __a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase , lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Union[str, Any] = model_class(_A )
lowercase : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : List[Any] = [*signature.parameters.keys()]
lowercase : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def __a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __a ( self : int ) -> int:
"""simple docstring"""
lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_A )
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
def __a ( self : Union[str, Any] , _A : List[Any] , _A : List[str] , _A : str=False ) -> int:
"""simple docstring"""
lowercase : Optional[int] = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def __a ( self : int ) -> Dict:
"""simple docstring"""
if not self.model_tester.is_training:
return
lowercase , lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Union[str, Any] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_A )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
lowercase : Optional[Any] = model_class(_A )
model.to(_A )
model.train()
lowercase : Optional[int] = self._prepare_for_class(_A , _A , return_labels=_A )
lowercase : List[Any] = model(**_A ).loss
loss.backward()
def __a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase , lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase : List[Any] = False
lowercase : List[str] = True
for model_class in self.all_model_classes:
if model_class in get_values(_A ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
lowercase : List[str] = model_class(_A )
model.gradient_checkpointing_enable()
model.to(_A )
model.train()
lowercase : Optional[Any] = self._prepare_for_class(_A , _A , return_labels=_A )
lowercase : Tuple = model(**_A ).loss
loss.backward()
def __a ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowercase , lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Union[str, Any] = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_A ),
*get_values(_A ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"""Testing {model_class} with {problem_type['title']}""" ):
lowercase : List[Any] = problem_type['''title''']
lowercase : List[Any] = problem_type['''num_labels''']
lowercase : Optional[int] = model_class(_A )
model.to(_A )
model.train()
lowercase : List[Any] = self._prepare_for_class(_A , _A , return_labels=_A )
if problem_type["num_labels"] > 1:
lowercase : Optional[Any] = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
lowercase : Union[str, Any] = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_A ) as warning_list:
lowercase : List[str] = model(**_A ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def __a ( self : Any ) -> Optional[int]:
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : str = DeiTModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def snake_case( ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _A ( unittest.TestCase ):
@cached_property
def __a ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def __a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase : str = DeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ).to(
_A )
lowercase : Any = self.default_image_processor
lowercase : List[str] = prepare_img()
lowercase : str = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
lowercase : int = model(**_A )
# verify the logits
lowercase : Any = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _A )
lowercase : Dict = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __a ( self : List[Any] ) -> str:
"""simple docstring"""
lowercase : Any = DeiTModel.from_pretrained(
'''facebook/deit-base-distilled-patch16-224''' , torch_dtype=torch.floataa , device_map='''auto''' )
lowercase : str = self.default_image_processor
lowercase : List[str] = prepare_img()
lowercase : List[Any] = image_processor(images=_A , return_tensors='''pt''' )
lowercase : List[str] = inputs.pixel_values.to(_A )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase : List[Any] = model(_A ) | 217 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class _A ( _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : Tuple = AlbertTokenizer
_UpperCamelCase : Dict = AlbertTokenizerFast
_UpperCamelCase : Optional[Any] = True
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : List[str] = True
def __a ( self : int ) -> int:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase : List[Any] = AlbertTokenizer(_A )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self : List[Any] , _A : Tuple ) -> Any:
"""simple docstring"""
lowercase : int = '''this is a test'''
lowercase : List[str] = '''this is a test'''
return input_text, output_text
def __a ( self : str ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] = '''<pad>'''
lowercase : Any = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def __a ( self : int ) -> List[Any]:
"""simple docstring"""
lowercase : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''▁eloquent''' )
self.assertEqual(len(_A ) , 30_000 )
def __a ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def __a ( self : Optional[int] ) -> int:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase : List[Any] = self.get_tokenizer()
lowercase : str = self.get_rust_tokenizer()
lowercase : List[str] = '''I was born in 92000, and this is falsé.'''
lowercase : int = tokenizer.tokenize(_A )
lowercase : List[Any] = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
lowercase : List[str] = tokenizer.encode(_A , add_special_tokens=_A )
lowercase : Tuple = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
lowercase : List[Any] = self.get_rust_tokenizer()
lowercase : Dict = tokenizer.encode(_A )
lowercase : List[Any] = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def __a ( self : List[str] ) -> Any:
"""simple docstring"""
lowercase : Union[str, Any] = AlbertTokenizer(_A , keep_accents=_A )
lowercase : Union[str, Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁this''', '''▁is''', '''▁a''', '''▁test'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [48, 25, 21, 1_289] )
lowercase : Dict = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.'''] )
lowercase : Union[str, Any] = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(_A , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] )
lowercase : Any = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.'''] , )
def __a ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Tuple = AlbertTokenizer(_A )
lowercase : List[str] = tokenizer.encode('''sequence builders''' )
lowercase : int = tokenizer.encode('''multi-sequence build''' )
lowercase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_A )
lowercase : Dict = tokenizer.build_inputs_with_special_tokens(_A , _A )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def __a ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase : Any = {'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''input_ids''': [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''albert-base-v2''' , revision='''6b6560eaf5ff2e250b00c50f380c5389a9c2d82e''' , ) | 217 | 1 |
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class lowercase__( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self :int ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase_ ) )
def __lowerCAmelCase ( self :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase_ ) )
def __lowerCAmelCase ( self :Dict ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCamelCase_ ) )
def __lowerCAmelCase ( self :List[str] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(lowerCamelCase_ ) )
def __lowerCAmelCase ( self :Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(lowerCamelCase_ ) )
def __lowerCAmelCase ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
SCREAMING_SNAKE_CASE : Union[str, Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCamelCase_ , variant=lowerCamelCase_ ) )
def __lowerCAmelCase ( self :str ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
SCREAMING_SNAKE_CASE : Dict = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCamelCase_ , variant=lowerCamelCase_ ) )
def __lowerCAmelCase ( self :int ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
SCREAMING_SNAKE_CASE : Union[str, Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCamelCase_ , variant=lowerCamelCase_ ) )
def __lowerCAmelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
SCREAMING_SNAKE_CASE : Dict = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowerCamelCase_ , variant=lowerCamelCase_ ) )
def __lowerCAmelCase ( self :Optional[int] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
SCREAMING_SNAKE_CASE : Optional[Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCamelCase_ , variant=lowerCamelCase_ ) )
def __lowerCAmelCase ( self :Dict ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
SCREAMING_SNAKE_CASE : str = '''fp16'''
self.assertTrue(is_safetensors_compatible(lowerCamelCase_ , variant=lowerCamelCase_ ) )
def __lowerCAmelCase ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
SCREAMING_SNAKE_CASE : Optional[Any] = '''fp16'''
self.assertFalse(is_safetensors_compatible(lowerCamelCase_ , variant=lowerCamelCase_ ) )
| 18 |
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __A ( a_ : float , a_ : float , a_ : bool = False )-> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(a_ ), magnitude * sin(a_ )]
return [magnitude * cos(radians(a_ ) ), magnitude * sin(radians(a_ ) )]
def __A ( a_ : NDArray[floataa] , a_ : NDArray[floataa] , a_ : float = 10**-1 )-> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE : NDArray[floataa] = cross(a_ , a_ )
SCREAMING_SNAKE_CASE : float = sum(a_ )
return abs(a_ ) < eps
if __name__ == "__main__":
# Test to check if it works
lowerCamelCase__ : Optional[Any] = array(
[
polar_force(7_1_8.4, 180 - 30),
polar_force(8_7_9.5_4, 45),
polar_force(100, -90),
]
)
lowerCamelCase__ : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowerCamelCase__ : Union[str, Any] = array(
[
polar_force(30 * 9.8_1, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
lowerCamelCase__ : Any = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowerCamelCase__ : Union[str, Any] = array([[0, -2000], [0, -1200], [0, 15600], [0, -12400]])
lowerCamelCase__ : Optional[int] = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 18 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class UpperCamelCase ( _UpperCAmelCase ):
__UpperCamelCase = '''marian'''
__UpperCamelCase = ['''past_key_values''']
__UpperCamelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[Any] ,_lowerCAmelCase : str=58_101 ,_lowerCAmelCase : Union[str, Any]=None ,_lowerCAmelCase : Tuple=1_024 ,_lowerCAmelCase : List[Any]=12 ,_lowerCAmelCase : int=4_096 ,_lowerCAmelCase : int=16 ,_lowerCAmelCase : str=12 ,_lowerCAmelCase : List[str]=4_096 ,_lowerCAmelCase : Tuple=16 ,_lowerCAmelCase : List[Any]=0.0 ,_lowerCAmelCase : Any=0.0 ,_lowerCAmelCase : List[Any]=True ,_lowerCAmelCase : Dict=True ,_lowerCAmelCase : Union[str, Any]="gelu" ,_lowerCAmelCase : int=1_024 ,_lowerCAmelCase : Optional[Any]=0.1 ,_lowerCAmelCase : List[Any]=0.0 ,_lowerCAmelCase : Optional[int]=0.0 ,_lowerCAmelCase : str=0.0_2 ,_lowerCAmelCase : Tuple=58_100 ,_lowerCAmelCase : int=False ,_lowerCAmelCase : Any=58_100 ,_lowerCAmelCase : Tuple=0 ,_lowerCAmelCase : Tuple=0 ,_lowerCAmelCase : List[Any]=True ,**_lowerCAmelCase : int ,):
"""simple docstring"""
__snake_case = vocab_size
__snake_case = decoder_vocab_size or vocab_size
__snake_case = max_position_embeddings
__snake_case = d_model
__snake_case = encoder_ffn_dim
__snake_case = encoder_layers
__snake_case = encoder_attention_heads
__snake_case = decoder_ffn_dim
__snake_case = decoder_layers
__snake_case = decoder_attention_heads
__snake_case = dropout
__snake_case = attention_dropout
__snake_case = activation_dropout
__snake_case = activation_function
__snake_case = init_std
__snake_case = encoder_layerdrop
__snake_case = decoder_layerdrop
__snake_case = use_cache
__snake_case = encoder_layers
__snake_case = scale_embedding # scale factor will be sqrt(d_model) if True
__snake_case = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=_lowercase ,eos_token_id=_lowercase ,is_encoder_decoder=_lowercase ,decoder_start_token_id=_lowercase ,forced_eos_token_id=_lowercase ,**_lowercase ,)
class UpperCamelCase ( _UpperCAmelCase ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def UpperCamelCase_ ( self : Tuple ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__snake_case = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__snake_case = {0: '''batch'''}
__snake_case = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__snake_case = {0: '''batch''', 1: '''decoder_sequence'''}
__snake_case = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_lowercase ,direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__snake_case = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__snake_case = self.num_layers
for i in range(_lowercase ):
__snake_case = {0: '''batch''', 2: '''past_sequence + sequence'''}
__snake_case = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
__snake_case = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def UpperCamelCase_ ( self : Tuple ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__snake_case = super().outputs
else:
__snake_case = super(_lowercase ,self ).outputs
if self.use_past:
__snake_case = self.num_layers
for i in range(_lowercase ):
__snake_case = {0: '''batch''', 2: '''past_sequence + sequence'''}
__snake_case = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def UpperCamelCase_ ( self : Optional[Any] ,_lowerCAmelCase : PreTrainedTokenizer ,_lowerCAmelCase : int = -1 ,_lowerCAmelCase : int = -1 ,_lowerCAmelCase : bool = False ,_lowerCAmelCase : Optional[TensorType] = None ,):
"""simple docstring"""
__snake_case = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowercase ,_lowercase ,_lowercase ,_lowercase ,_lowercase )
# Generate decoder inputs
__snake_case = seq_length if not self.use_past else 1
__snake_case = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowercase ,_lowercase ,_lowercase ,_lowercase ,_lowercase )
__snake_case = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
__snake_case = dict(**_lowercase ,**_lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__snake_case = common_inputs['''input_ids'''].shape
__snake_case = common_inputs['''decoder_input_ids'''].shape[1]
__snake_case = self.num_attention_heads
__snake_case = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__snake_case = decoder_seq_length + 3
__snake_case = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__snake_case = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(_lowercase ,_lowercase )] ,dim=1 )
__snake_case = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__snake_case = self.num_layers
__snake_case = min(_lowercase ,_lowercase )
__snake_case = max(_lowercase ,_lowercase ) - min_num_layers
__snake_case = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(_lowercase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowercase ),
torch.zeros(_lowercase ),
torch.zeros(_lowercase ),
torch.zeros(_lowercase ),
) )
# TODO: test this.
__snake_case = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(_lowercase ,_lowercase ):
common_inputs["past_key_values"].append((torch.zeros(_lowercase ), torch.zeros(_lowercase )) )
return common_inputs
def UpperCamelCase_ ( self : int ,_lowerCAmelCase : PreTrainedTokenizer ,_lowerCAmelCase : int = -1 ,_lowerCAmelCase : int = -1 ,_lowerCAmelCase : bool = False ,_lowerCAmelCase : Optional[TensorType] = None ,):
"""simple docstring"""
__snake_case = self._generate_dummy_inputs_for_encoder_and_decoder(
_lowercase ,_lowercase ,_lowercase ,_lowercase ,_lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__snake_case = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__snake_case = seqlen + 2
__snake_case = self.num_layers
__snake_case = self.num_attention_heads
__snake_case = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__snake_case = common_inputs['''attention_mask'''].dtype
__snake_case = torch.cat(
[common_inputs["attention_mask"], torch.ones(_lowercase ,_lowercase ,dtype=_lowercase )] ,dim=1 )
__snake_case = [
(torch.zeros(_lowercase ), torch.zeros(_lowercase )) for _ in range(_lowercase )
]
return common_inputs
def UpperCamelCase_ ( self : Optional[Any] ,_lowerCAmelCase : PreTrainedTokenizer ,_lowerCAmelCase : int = -1 ,_lowerCAmelCase : int = -1 ,_lowerCAmelCase : bool = False ,_lowerCAmelCase : Optional[TensorType] = None ,):
"""simple docstring"""
__snake_case = compute_effective_axis_dimension(
_lowercase ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__snake_case = tokenizer.num_special_tokens_to_add(_lowercase )
__snake_case = compute_effective_axis_dimension(
_lowercase ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=_lowercase )
# Generate dummy inputs according to compute batch and sequence
__snake_case = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
__snake_case = dict(tokenizer(_lowercase ,return_tensors=_lowercase ) )
return common_inputs
def UpperCamelCase_ ( self : List[str] ,_lowerCAmelCase : PreTrainedTokenizer ,_lowerCAmelCase : int = -1 ,_lowerCAmelCase : int = -1 ,_lowerCAmelCase : bool = False ,_lowerCAmelCase : Optional[TensorType] = None ,):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__snake_case = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowercase ,batch_size=_lowercase ,seq_length=_lowercase ,is_pair=_lowercase ,framework=_lowercase )
else:
__snake_case = self._generate_dummy_inputs_for_causal_lm(
_lowercase ,batch_size=_lowercase ,seq_length=_lowercase ,is_pair=_lowercase ,framework=_lowercase )
return common_inputs
def UpperCamelCase_ ( self : str ,_lowerCAmelCase : Union[str, Any] ,_lowerCAmelCase : Any ,_lowerCAmelCase : Optional[int] ,_lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
__snake_case = super()._flatten_past_key_values_(_lowercase ,_lowercase ,_lowercase ,_lowercase )
else:
__snake_case = super(_lowercase ,self )._flatten_past_key_values_(
_lowercase ,_lowercase ,_lowercase ,_lowercase )
@property
def UpperCamelCase_ ( self : Dict ):
"""simple docstring"""
return 1E-4
| 524 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class lowercase :
@staticmethod
def lowercase__ ( *_lowercase : Optional[Any] , **_lowercase : str ):
pass
def a ( A__ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase ( unittest.TestCase ):
lowerCamelCase : int = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowercase__ ( self : List[Any] , _lowercase : Tuple , _lowercase : Any , _lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ : List[str] = DepthEstimationPipeline(model=_lowercase , image_processor=_lowercase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowercase__ ( self : Union[str, Any] , _lowercase : int , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : Optional[int] = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , _lowercase )
import datasets
SCREAMING_SNAKE_CASE__ : List[str] = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
SCREAMING_SNAKE_CASE__ : Dict = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] , _lowercase , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def lowercase__ ( self : Optional[int] ):
pass
@slow
@require_torch
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = '''Intel/dpt-large'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipeline('''depth-estimation''' , model=_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
SCREAMING_SNAKE_CASE__ : List[str] = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.662 )
@require_torch
def lowercase__ ( self : str ):
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 35 | 0 |
from torch import nn
class UpperCamelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase):
super().__init__()
lowerCAmelCase_ = class_size
lowerCAmelCase_ = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
lowerCAmelCase_ = nn.Linear(_UpperCAmelCase , _UpperCAmelCase)
def lowercase__ ( self , _UpperCAmelCase):
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
lowerCAmelCase_ = self.mlp(_UpperCAmelCase)
return logits
| 413 |
def lowerCamelCase_ ( ):
"""simple docstring"""
return [
a * b * (10_00 - a - b)
for a in range(1 , 9_99 )
for b in range(A , 9_99 )
if (a * a + b * b == (10_00 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 413 | 1 |
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__A : Tuple = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
_A = {}
state_dict.pop('pixel_mean' , _SCREAMING_SNAKE_CASE )
state_dict.pop('pixel_std' , _SCREAMING_SNAKE_CASE )
_A = R'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_A = key.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if re.match(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_A = int(re.match(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).group(2 ) )
if layer_nb == 0:
_A = key.replace('layers.0' , 'proj_in' )
elif layer_nb == 1:
_A = key.replace('layers.1' , 'layers.0' )
elif layer_nb == 2:
_A = key.replace('layers.2' , 'proj_out' )
_A = value
_A = model_state_dict[
'prompt_encoder.shared_embedding.positional_embedding'
]
return model_state_dict
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="ybelkada/segment-anything" ) -> Optional[int]:
"""simple docstring"""
_A = hf_hub_download(_SCREAMING_SNAKE_CASE , F"checkpoints/{model_name}.pth" )
if "sam_vit_b" in model_name:
_A = SamConfig()
elif "sam_vit_l" in model_name:
_A = SamVisionConfig(
hidden_size=1_024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
_A = SamConfig(
vision_config=_SCREAMING_SNAKE_CASE , )
elif "sam_vit_h" in model_name:
_A = SamVisionConfig(
hidden_size=1_280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
_A = SamConfig(
vision_config=_SCREAMING_SNAKE_CASE , )
_A = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )
_A = replace_keys(_SCREAMING_SNAKE_CASE )
_A = SamImageProcessor()
_A = SamProcessor(image_processor=_SCREAMING_SNAKE_CASE )
_A = SamModel(_SCREAMING_SNAKE_CASE )
hf_model.load_state_dict(_SCREAMING_SNAKE_CASE )
_A = hf_model.to('cuda' )
_A = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'
_A = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ).convert('RGB' )
_A = [[[400, 650]]]
_A = [[1]]
_A = processor(images=np.array(_SCREAMING_SNAKE_CASE ) , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
_A = hf_model(**_SCREAMING_SNAKE_CASE )
_A = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_8902_5115_9668
_A = processor(
images=np.array(_SCREAMING_SNAKE_CASE ) , input_points=_SCREAMING_SNAKE_CASE , input_labels=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
_A = hf_model(**_SCREAMING_SNAKE_CASE )
_A = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712_6030_9219_3604
_A = ((75, 275, 1_725, 850),)
_A = processor(images=np.array(_SCREAMING_SNAKE_CASE ) , input_boxes=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
_A = hf_model(**_SCREAMING_SNAKE_CASE )
_A = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686_0156_0592_6514
# Test with 2 points and 1 image.
_A = [[[400, 650], [800, 650]]]
_A = [[1, 1]]
_A = processor(
images=np.array(_SCREAMING_SNAKE_CASE ) , input_points=_SCREAMING_SNAKE_CASE , input_labels=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to('cuda' )
with torch.no_grad():
_A = hf_model(**_SCREAMING_SNAKE_CASE )
_A = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936_0477_9243_4692
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
__A : List[str] = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
__A : Union[str, Any] = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 27 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def __UpperCAmelCase( lowercase_ ):
# vision encoder
if "img_encoder.pos_embed" in name:
_lowerCamelCase : Tuple = name.replace('''img_encoder.pos_embed''' , '''vision_model.embeddings.position_embeddings''' )
if "img_encoder.patch_embed.proj" in name:
_lowerCamelCase : List[str] = name.replace('''img_encoder.patch_embed.proj''' , '''vision_model.embeddings.patch_embeddings.projection''' )
if "img_encoder.patch_embed.norm" in name:
_lowerCamelCase : Dict = name.replace('''img_encoder.patch_embed.norm''' , '''vision_model.embeddings.layernorm''' )
if "img_encoder.layers" in name:
_lowerCamelCase : Dict = name.replace('''img_encoder.layers''' , '''vision_model.encoder.stages''' )
if "blocks" in name and "res" not in name:
_lowerCamelCase : Optional[Any] = name.replace('''blocks''' , '''layers''' )
if "attn" in name and "pre_assign" not in name:
_lowerCamelCase : Tuple = name.replace('''attn''' , '''self_attn''' )
if "proj" in name and "self_attn" in name and "text" not in name:
_lowerCamelCase : Optional[int] = name.replace('''proj''' , '''out_proj''' )
if "pre_assign_attn.attn.proj" in name:
_lowerCamelCase : List[Any] = name.replace('''pre_assign_attn.attn.proj''' , '''pre_assign_attn.attn.out_proj''' )
if "norm1" in name:
_lowerCamelCase : int = name.replace('''norm1''' , '''layer_norm1''' )
if "norm2" in name and "pre_assign" not in name:
_lowerCamelCase : Optional[int] = name.replace('''norm2''' , '''layer_norm2''' )
if "img_encoder.norm" in name:
_lowerCamelCase : Dict = name.replace('''img_encoder.norm''' , '''vision_model.layernorm''' )
# text encoder
if "text_encoder.token_embedding" in name:
_lowerCamelCase : List[Any] = name.replace('''text_encoder.token_embedding''' , '''text_model.embeddings.token_embedding''' )
if "text_encoder.positional_embedding" in name:
_lowerCamelCase : Optional[Any] = name.replace('''text_encoder.positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "text_encoder.transformer.resblocks." in name:
_lowerCamelCase : Dict = name.replace('''text_encoder.transformer.resblocks.''' , '''text_model.encoder.layers.''' )
if "ln_1" in name:
_lowerCamelCase : Tuple = name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
_lowerCamelCase : List[str] = name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
_lowerCamelCase : List[str] = name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
_lowerCamelCase : str = name.replace('''c_proj''' , '''fc2''' )
if "text_encoder" in name:
_lowerCamelCase : Tuple = name.replace('''text_encoder''' , '''text_model''' )
if "ln_final" in name:
_lowerCamelCase : str = name.replace('''ln_final''' , '''final_layer_norm''' )
# projection layers
if "img_projector.linear_hidden." in name:
_lowerCamelCase : str = name.replace('''img_projector.linear_hidden.''' , '''visual_projection.''' )
if "img_projector.linear_out." in name:
_lowerCamelCase : Union[str, Any] = name.replace('''img_projector.linear_out.''' , '''visual_projection.3.''' )
if "text_projector.linear_hidden" in name:
_lowerCamelCase : List[str] = name.replace('''text_projector.linear_hidden''' , '''text_projection''' )
if "text_projector.linear_out" in name:
_lowerCamelCase : Any = name.replace('''text_projector.linear_out''' , '''text_projection.3''' )
return name
def __UpperCAmelCase( lowercase_ , lowercase_ ):
for key in orig_state_dict.copy().keys():
_lowerCamelCase : Optional[int] = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Tuple = key.split('''.''' )
_lowerCamelCase, _lowerCamelCase : Dict = int(key_split[2] ), int(key_split[4] )
_lowerCamelCase : Optional[int] = config.vision_config.hidden_size
if "weight" in key:
_lowerCamelCase : List[Any] = val[:dim, :]
_lowerCamelCase : List[Any] = val[dim : dim * 2, :]
_lowerCamelCase : Optional[Any] = val[-dim:, :]
else:
_lowerCamelCase : Union[str, Any] = val[:dim]
_lowerCamelCase : Optional[Any] = val[dim : dim * 2]
_lowerCamelCase : Any = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
_lowerCamelCase : Optional[int] = key.split('''.''' )
_lowerCamelCase : Optional[Any] = int(key_split[3] )
_lowerCamelCase : Union[str, Any] = config.text_config.hidden_size
if "weight" in key:
_lowerCamelCase : Any = val[:dim, :]
_lowerCamelCase : Optional[Any] = val[
dim : dim * 2, :
]
_lowerCamelCase : Dict = val[-dim:, :]
else:
_lowerCamelCase : List[Any] = val[:dim]
_lowerCamelCase : Tuple = val[dim : dim * 2]
_lowerCamelCase : str = val[-dim:]
else:
_lowerCamelCase : Optional[Any] = rename_key(lowercase_ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
_lowerCamelCase : List[str] = val.squeeze_()
else:
_lowerCamelCase : Tuple = val
return orig_state_dict
def __UpperCAmelCase( ):
_lowerCamelCase : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowerCamelCase : Any = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_="groupvit-gcc-yfcc" , lowercase_=False ):
_lowerCamelCase : Optional[Any] = GroupViTConfig()
_lowerCamelCase : Any = GroupViTModel(lowercase_ ).eval()
_lowerCamelCase : Optional[Any] = torch.load(lowercase_ , map_location='''cpu''' )['''model''']
_lowerCamelCase : List[str] = convert_state_dict(lowercase_ , lowercase_ )
_lowerCamelCase, _lowerCamelCase : List[str] = model.load_state_dict(lowercase_ , strict=lowercase_ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowercase_ ) == 0)
# verify result
_lowerCamelCase : Optional[Any] = CLIPProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
_lowerCamelCase : Dict = prepare_img()
_lowerCamelCase : str = processor(text=['''a photo of a cat''', '''a photo of a dog'''] , images=lowercase_ , padding=lowercase_ , return_tensors='''pt''' )
with torch.no_grad():
_lowerCamelCase : Optional[int] = model(**lowercase_ )
if model_name == "groupvit-gcc-yfcc":
_lowerCamelCase : Dict = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
_lowerCamelCase : Tuple = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F"""Model name {model_name} not supported.""" )
assert torch.allclose(outputs.logits_per_image , lowercase_ , atol=1e-3 )
processor.save_pretrained(lowercase_ )
model.save_pretrained(lowercase_ )
print('''Successfully saved processor and model to''' , lowercase_ )
if push_to_hub:
print('''Pushing to the hub...''' )
processor.push_to_hub(lowercase_ , organization='''nielsr''' )
model.push_to_hub(lowercase_ , organization='''nielsr''' )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
_lowerCamelCase = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 114 | 0 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : int):
if not isinstance(lowerCamelCase , lowerCamelCase):
raise ValueError("""check_bouncy() accepts only integer arguments""")
A_ : List[str] = str(lowerCamelCase)
A_ : Union[str, Any] = """""".join(sorted(lowerCamelCase))
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def lowerCamelCase ( lowerCamelCase : float = 99):
if not 0 < percent < 100:
raise ValueError("""solution() only accepts values from 0 to 100""")
A_ : Optional[int] = 0
A_ : int = 1
while True:
if check_bouncy(lowerCamelCase):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(99)}""")
| 27 |
'''simple docstring'''
import baseaa
def lowerCamelCase ( lowerCamelCase : str):
return baseaa.aaaencode(string.encode("""utf-8"""))
def lowerCamelCase ( lowerCamelCase : bytes):
return baseaa.aaadecode(lowerCamelCase).decode("""utf-8""")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27 | 1 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_lowerCAmelCase : int = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
_lowerCAmelCase : Union[str, Any] = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
_lowerCAmelCase : Tuple = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase_ :
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default=_UpperCamelCase , metadata={'help': 'A folder containing the training data.'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(default=_UpperCamelCase , metadata={'help': 'A folder containing the validation data.'} )
__SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
__SCREAMING_SNAKE_CASE : int = field(default=3_2 , metadata={'help': 'The size of the square patches to use for masking.'} )
__SCREAMING_SNAKE_CASE : float = field(
default=0.6 , metadata={'help': 'Percentage of patches to mask.'} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=_UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=_UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def snake_case_ ( self : str ):
_UpperCAmelCase : List[str] = {}
if self.train_dir is not None:
_UpperCAmelCase : Optional[int] = self.train_dir
if self.validation_dir is not None:
_UpperCAmelCase : Union[str, Any] = self.validation_dir
_UpperCAmelCase : str = data_files if data_files else None
@dataclass
class UpperCAmelCase_ :
__SCREAMING_SNAKE_CASE : str = field(
default=_UpperCamelCase , metadata={
'help': (
'The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '
'checkpoint identifier on the hub. '
'Don\'t set if you want to train a model from scratch.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_UpperCamelCase )} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=_UpperCamelCase , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'} , )
__SCREAMING_SNAKE_CASE : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__SCREAMING_SNAKE_CASE : str = field(default=_UpperCamelCase , metadata={'help': 'Name or path of preprocessor config.'} )
__SCREAMING_SNAKE_CASE : bool = field(
default=_UpperCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=_UpperCamelCase , metadata={
'help': (
'The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=_UpperCamelCase , metadata={
'help': (
'The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[int] = field(
default=_UpperCamelCase , metadata={'help': 'Stride to use for the encoder.'} , )
class UpperCAmelCase_ :
def __init__( self : Tuple , A : Dict=1_9_2 , A : Tuple=3_2 , A : str=4 , A : Any=0.6 ):
_UpperCAmelCase : Union[str, Any] = input_size
_UpperCAmelCase : str = mask_patch_size
_UpperCAmelCase : Any = model_patch_size
_UpperCAmelCase : List[Any] = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError("Input size must be divisible by mask patch size" )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError("Mask patch size must be divisible by model patch size" )
_UpperCAmelCase : int = self.input_size // self.mask_patch_size
_UpperCAmelCase : int = self.mask_patch_size // self.model_patch_size
_UpperCAmelCase : List[Any] = self.rand_size**2
_UpperCAmelCase : Any = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : List[Any] ):
_UpperCAmelCase : Optional[Any] = np.random.permutation(self.token_count )[: self.mask_count]
_UpperCAmelCase : List[str] = np.zeros(self.token_count , dtype=A )
_UpperCAmelCase : Union[str, Any] = 1
_UpperCAmelCase : Union[str, Any] = mask.reshape((self.rand_size, self.rand_size) )
_UpperCAmelCase : Any = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def __snake_case ( SCREAMING_SNAKE_CASE__ : str ) -> int:
'''simple docstring'''
_UpperCAmelCase : Dict = torch.stack([example["pixel_values"] for example in examples] )
_UpperCAmelCase : int = torch.stack([example["mask"] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def __snake_case ( ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_mim" , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCAmelCase : List[str] = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
_UpperCAmelCase : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset.
_UpperCAmelCase : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
_UpperCAmelCase : int = None if "validation" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , SCREAMING_SNAKE_CASE__ ) and data_args.train_val_split > 0.0:
_UpperCAmelCase : int = ds["train"].train_test_split(data_args.train_val_split )
_UpperCAmelCase : List[Any] = split["train"]
_UpperCAmelCase : Any = split["test"]
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase : str = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
_UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(model_args.config_name_or_path , **SCREAMING_SNAKE_CASE__ )
elif model_args.model_name_or_path:
_UpperCAmelCase : Any = AutoConfig.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE__ )
else:
_UpperCAmelCase : Union[str, Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(f'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(f'New config: {config}' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(SCREAMING_SNAKE_CASE__ , "decoder_type" ):
_UpperCAmelCase : Tuple = "simmim"
# adapt config
_UpperCAmelCase : Any = model_args.image_size if model_args.image_size is not None else config.image_size
_UpperCAmelCase : str = model_args.patch_size if model_args.patch_size is not None else config.patch_size
_UpperCAmelCase : Dict = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
"image_size": model_args.image_size,
"patch_size": model_args.patch_size,
"encoder_stride": model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
_UpperCAmelCase : int = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **SCREAMING_SNAKE_CASE__ )
elif model_args.model_name_or_path:
_UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **SCREAMING_SNAKE_CASE__ )
else:
_UpperCAmelCase : Optional[int] = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
_UpperCAmelCase : List[str] = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
_UpperCAmelCase : Tuple = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
_UpperCAmelCase : int = AutoModelForMaskedImageModeling.from_config(SCREAMING_SNAKE_CASE__ )
if training_args.do_train:
_UpperCAmelCase : Tuple = ds["train"].column_names
else:
_UpperCAmelCase : Union[str, Any] = ds["validation"].column_names
if data_args.image_column_name is not None:
_UpperCAmelCase : Any = data_args.image_column_name
elif "image" in column_names:
_UpperCAmelCase : List[Any] = "image"
elif "img" in column_names:
_UpperCAmelCase : List[Any] = "img"
else:
_UpperCAmelCase : List[Any] = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
_UpperCAmelCase : Dict = Compose(
[
Lambda(lambda SCREAMING_SNAKE_CASE__ : img.convert("RGB" ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
_UpperCAmelCase : str = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(SCREAMING_SNAKE_CASE__ : List[str] ):
_UpperCAmelCase : List[Any] = [transforms(SCREAMING_SNAKE_CASE__ ) for image in examples[image_column_name]]
_UpperCAmelCase : Dict = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
_UpperCAmelCase : Optional[Any] = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(SCREAMING_SNAKE_CASE__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
_UpperCAmelCase : List[Any] = (
ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(SCREAMING_SNAKE_CASE__ )
# Initialize our trainer
_UpperCAmelCase : Tuple = Trainer(
model=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=SCREAMING_SNAKE_CASE__ , data_collator=SCREAMING_SNAKE_CASE__ , )
# Training
if training_args.do_train:
_UpperCAmelCase : int = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase : Tuple = last_checkpoint
_UpperCAmelCase : Union[str, Any] = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_UpperCAmelCase : Optional[int] = trainer.evaluate()
trainer.log_metrics("eval" , SCREAMING_SNAKE_CASE__ )
trainer.save_metrics("eval" , SCREAMING_SNAKE_CASE__ )
# Write model card and (optionally) push to hub
_UpperCAmelCase : Tuple = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "masked-image-modeling",
"dataset": data_args.dataset_name,
"tags": ["masked-image-modeling"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE__ )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 289 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : List[Any] = {
"configuration_lilt": ["LILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LiltConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = [
"LILT_PRETRAINED_MODEL_ARCHIVE_LIST",
"LiltForQuestionAnswering",
"LiltForSequenceClassification",
"LiltForTokenClassification",
"LiltModel",
"LiltPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 289 | 1 |
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __lowerCamelCase ( A__ : Dict , A__ : str , A__ : str , A__ : Any ) -> int:
if isinstance(A__ , A__ ):
lowerCamelCase_ : str = np.full((len(A__ ), sequence_length, 2) , A__ )
else:
lowerCamelCase_ : Union[str, Any] = np.full((len(A__ ), sequence_length) , A__ )
for i, tensor in enumerate(A__ ):
if padding_side == "right":
if isinstance(A__ , A__ ):
lowerCamelCase_ : List[str] = tensor[:sequence_length]
else:
lowerCamelCase_ : List[str] = tensor[:sequence_length]
else:
if isinstance(A__ , A__ ):
lowerCamelCase_ : List[Any] = tensor[:sequence_length]
else:
lowerCamelCase_ : str = tensor[:sequence_length]
return out_tensor.tolist()
def __lowerCamelCase ( A__ : Tuple ) -> List[Any]:
lowerCamelCase_ : Dict = ord(A__ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
lowerCamelCase_ : int = unicodedata.category(A__ )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = 42
_a = True
_a = None
_a = None
_a = -100
_a = "pt"
def _lowerCAmelCase ( self : List[Any] , __a : List[str] ) ->List[str]:
import torch
lowerCamelCase_ : str = """label""" if """label""" in features[0].keys() else """labels"""
lowerCamelCase_ : Tuple = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
lowerCamelCase_ : List[Any] = self.tokenizer.pad(
__a , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" if labels is None else None , )
if labels is None:
return batch
lowerCamelCase_ : Tuple = torch.tensor(batch["""entity_ids"""] ).shape[1]
lowerCamelCase_ : Union[str, Any] = self.tokenizer.padding_side
if padding_side == "right":
lowerCamelCase_ : Any = [
list(__a ) + [self.label_pad_token_id] * (sequence_length - len(__a )) for label in labels
]
else:
lowerCamelCase_ : Any = [
[self.label_pad_token_id] * (sequence_length - len(__a )) + list(__a ) for label in labels
]
lowerCamelCase_ : List[str] = [feature["""ner_tags"""] for feature in features]
lowerCamelCase_ : Tuple = padding_tensor(__a , -1 , __a , __a )
lowerCamelCase_ : int = [feature["""original_entity_spans"""] for feature in features]
lowerCamelCase_ : Optional[Any] = padding_tensor(__a , (-1, -1) , __a , __a )
lowerCamelCase_ : Optional[Any] = {k: torch.tensor(__a , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 171 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
snake_case__ : int = logging.get_logger(__name__)
snake_case__ : List[str] = {
'Visual-Attention-Network/van-base': (
'https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'
),
}
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = "van"
def __init__( self : int , __a : List[Any]=224 , __a : Dict=3 , __a : List[str]=[7, 3, 3, 3] , __a : Any=[4, 2, 2, 2] , __a : str=[64, 128, 320, 512] , __a : Dict=[3, 3, 12, 3] , __a : List[str]=[8, 8, 4, 4] , __a : List[str]="gelu" , __a : Optional[Any]=0.02 , __a : Dict=1e-6 , __a : List[str]=1e-2 , __a : Optional[int]=0.0 , __a : str=0.0 , **__a : Optional[Any] , ) ->str:
super().__init__(**__a )
lowerCamelCase_ : Optional[Any] = image_size
lowerCamelCase_ : List[str] = num_channels
lowerCamelCase_ : Union[str, Any] = patch_sizes
lowerCamelCase_ : List[Any] = strides
lowerCamelCase_ : Union[str, Any] = hidden_sizes
lowerCamelCase_ : Tuple = depths
lowerCamelCase_ : str = mlp_ratios
lowerCamelCase_ : Any = hidden_act
lowerCamelCase_ : Union[str, Any] = initializer_range
lowerCamelCase_ : Union[str, Any] = layer_norm_eps
lowerCamelCase_ : Union[str, Any] = layer_scale_init_value
lowerCamelCase_ : List[str] = drop_path_rate
lowerCamelCase_ : str = dropout_rate
| 171 | 1 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class snake_case_ ( __A ):
'''simple docstring'''
lowerCamelCase = 42
lowerCamelCase = 42
class snake_case_ ( nn.Module ):
'''simple docstring'''
lowerCamelCase = 42
lowerCamelCase = (16, 32, 96, 2_56)
lowerCamelCase = jnp.floataa
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
lowerCamelCase_ : List[str] = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCamelCase_ : Optional[int] = []
for i in range(len(self.block_out_channels ) - 1 ):
lowerCamelCase_ : List[Any] = self.block_out_channels[i]
lowerCamelCase_ : str = self.block_out_channels[i + 1]
lowerCamelCase_ : Tuple = nn.Conv(
__magic_name__ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__magic_name__ )
lowerCamelCase_ : List[str] = nn.Conv(
__magic_name__ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__magic_name__ )
lowerCamelCase_ : List[Any] = blocks
lowerCamelCase_ : List[Any] = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : str , __magic_name__ : Tuple ) -> Optional[Any]:
lowerCamelCase_ : Dict = self.conv_in(__magic_name__ )
lowerCamelCase_ : Optional[int] = nn.silu(__magic_name__ )
for block in self.blocks:
lowerCamelCase_ : Optional[int] = block(__magic_name__ )
lowerCamelCase_ : List[str] = nn.silu(__magic_name__ )
lowerCamelCase_ : Tuple = self.conv_out(__magic_name__ )
return embedding
@flax_register_to_config
class snake_case_ ( nn.Module , __A , __A ):
'''simple docstring'''
lowerCamelCase = 32
lowerCamelCase = 4
lowerCamelCase = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowerCamelCase = False
lowerCamelCase = (3_20, 6_40, 12_80, 12_80)
lowerCamelCase = 2
lowerCamelCase = 8
lowerCamelCase = None
lowerCamelCase = 12_80
lowerCamelCase = 0.0
lowerCamelCase = False
lowerCamelCase = jnp.floataa
lowerCamelCase = True
lowerCamelCase = 0
lowerCamelCase = "rgb"
lowerCamelCase = (16, 32, 96, 2_56)
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : jax.random.KeyArray ) -> FrozenDict:
# init input tensors
lowerCamelCase_ : Union[str, Any] = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCamelCase_ : Tuple = jnp.zeros(__magic_name__ , dtype=jnp.floataa )
lowerCamelCase_ : Optional[int] = jnp.ones((1,) , dtype=jnp.intaa )
lowerCamelCase_ : Any = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCamelCase_ : Tuple = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowerCamelCase_ : Dict = jnp.zeros(__magic_name__ , dtype=jnp.floataa )
lowerCamelCase_ , lowerCamelCase_ : Any = jax.random.split(__magic_name__ )
lowerCamelCase_ : Optional[Any] = {"params": params_rng, "dropout": dropout_rng}
return self.init(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )["params"]
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
lowerCamelCase_ : Optional[Any] = self.block_out_channels
lowerCamelCase_ : Dict = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCamelCase_ : str = self.num_attention_heads or self.attention_head_dim
# input
lowerCamelCase_ : Union[str, Any] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCamelCase_ : Tuple = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCamelCase_ : Dict = FlaxTimestepEmbedding(__magic_name__ , dtype=self.dtype )
lowerCamelCase_ : List[Any] = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowerCamelCase_ : Optional[Any] = self.only_cross_attention
if isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase_ : int = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase_ : str = (num_attention_heads,) * len(self.down_block_types )
# down
lowerCamelCase_ : str = []
lowerCamelCase_ : List[Any] = []
lowerCamelCase_ : Union[str, Any] = block_out_channels[0]
lowerCamelCase_ : Optional[int] = nn.Conv(
__magic_name__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__magic_name__ )
for i, down_block_type in enumerate(self.down_block_types ):
lowerCamelCase_ : List[Any] = output_channel
lowerCamelCase_ : List[Any] = block_out_channels[i]
lowerCamelCase_ : str = i == len(__magic_name__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCamelCase_ : List[Any] = FlaxCrossAttnDownBlockaD(
in_channels=__magic_name__ , out_channels=__magic_name__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowerCamelCase_ : Union[str, Any] = FlaxDownBlockaD(
in_channels=__magic_name__ , out_channels=__magic_name__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__magic_name__ )
for _ in range(self.layers_per_block ):
lowerCamelCase_ : int = nn.Conv(
__magic_name__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__magic_name__ )
if not is_final_block:
lowerCamelCase_ : Any = nn.Conv(
__magic_name__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__magic_name__ )
lowerCamelCase_ : List[Any] = down_blocks
lowerCamelCase_ : Union[str, Any] = controlnet_down_blocks
# mid
lowerCamelCase_ : int = block_out_channels[-1]
lowerCamelCase_ : Optional[Any] = FlaxUNetMidBlockaDCrossAttn(
in_channels=__magic_name__ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowerCamelCase_ : Any = nn.Conv(
__magic_name__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : int , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : float = 1.0 , __magic_name__ : bool = True , __magic_name__ : bool = False , ) -> Union[FlaxControlNetOutput, Tuple]:
lowerCamelCase_ : int = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowerCamelCase_ : Dict = jnp.flip(__magic_name__ , axis=1 )
# 1. time
if not isinstance(__magic_name__ , jnp.ndarray ):
lowerCamelCase_ : Optional[int] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__magic_name__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCamelCase_ : Any = timesteps.astype(dtype=jnp.floataa )
lowerCamelCase_ : Optional[Any] = jnp.expand_dims(__magic_name__ , 0 )
lowerCamelCase_ : Union[str, Any] = self.time_proj(__magic_name__ )
lowerCamelCase_ : str = self.time_embedding(__magic_name__ )
# 2. pre-process
lowerCamelCase_ : Union[str, Any] = jnp.transpose(__magic_name__ , (0, 2, 3, 1) )
lowerCamelCase_ : Union[str, Any] = self.conv_in(__magic_name__ )
lowerCamelCase_ : Tuple = jnp.transpose(__magic_name__ , (0, 2, 3, 1) )
lowerCamelCase_ : int = self.controlnet_cond_embedding(__magic_name__ )
sample += controlnet_cond
# 3. down
lowerCamelCase_ : List[str] = (sample,)
for down_block in self.down_blocks:
if isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase_ , lowerCamelCase_ : Dict = down_block(__magic_name__ , __magic_name__ , __magic_name__ , deterministic=not train )
else:
lowerCamelCase_ , lowerCamelCase_ : Tuple = down_block(__magic_name__ , __magic_name__ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowerCamelCase_ : List[str] = self.mid_block(__magic_name__ , __magic_name__ , __magic_name__ , deterministic=not train )
# 5. contronet blocks
lowerCamelCase_ : List[str] = ()
for down_block_res_sample, controlnet_block in zip(__magic_name__ , self.controlnet_down_blocks ):
lowerCamelCase_ : Optional[int] = controlnet_block(__magic_name__ )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowerCamelCase_ : Union[str, Any] = controlnet_down_block_res_samples
lowerCamelCase_ : Tuple = self.controlnet_mid_block(__magic_name__ )
# 6. scaling
lowerCamelCase_ : Any = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=__magic_name__ , mid_block_res_sample=__magic_name__ )
| 488 |
class snake_case_ :
'''simple docstring'''
def __init__( self : Tuple , __magic_name__ : Any , __magic_name__ : int , __magic_name__ : List[Any] ) -> Union[str, Any]:
lowerCamelCase_ : Any = name
lowerCamelCase_ : Optional[Any] = value
lowerCamelCase_ : str = weight
def __repr__( self : int ) -> Optional[int]:
return F"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
return self.value
def __SCREAMING_SNAKE_CASE ( self : str ) -> int:
return self.name
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
return self.weight
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
return self.value / self.weight
def __a ( __UpperCAmelCase : Any , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any ) -> int:
"""simple docstring"""
lowerCamelCase_ : int = []
for i in range(len(__UpperCAmelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def __a ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ : Dict = sorted(__UpperCAmelCase , key=__UpperCAmelCase , reverse=__UpperCAmelCase )
lowerCamelCase_ : List[Any] = []
lowerCamelCase_ , lowerCamelCase_ : Tuple = 0.0, 0.0
for i in range(len(__UpperCAmelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __a ( ) -> List[str]:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 488 | 1 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a : str = logging.get_logger(__name__)
__a : Tuple = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class UpperCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
a : Union[str, Any] = '''efficientnet'''
def __init__( self , lowerCamelCase = 3 , lowerCamelCase = 600 , lowerCamelCase = 2.0 , lowerCamelCase = 3.1 , lowerCamelCase = 8 , lowerCamelCase = [3, 3, 5, 3, 5, 5, 3] , lowerCamelCase = [32, 16, 24, 40, 80, 112, 192] , lowerCamelCase = [16, 24, 40, 80, 112, 192, 320] , lowerCamelCase = [] , lowerCamelCase = [1, 2, 2, 2, 1, 2, 1] , lowerCamelCase = [1, 2, 2, 3, 3, 4, 1] , lowerCamelCase = [1, 6, 6, 6, 6, 6, 6] , lowerCamelCase = 0.25 , lowerCamelCase = "swish" , lowerCamelCase = 2560 , lowerCamelCase = "mean" , lowerCamelCase = 0.02 , lowerCamelCase = 0.0_01 , lowerCamelCase = 0.99 , lowerCamelCase = 0.5 , lowerCamelCase = 0.2 , **lowerCamelCase , ) -> Dict:
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = num_channels
lowercase__ : List[str] = image_size
lowercase__ : List[Any] = width_coefficient
lowercase__ : Optional[int] = depth_coefficient
lowercase__ : List[Any] = depth_divisor
lowercase__ : Tuple = kernel_sizes
lowercase__ : Dict = in_channels
lowercase__ : Union[str, Any] = out_channels
lowercase__ : Dict = depthwise_padding
lowercase__ : Dict = strides
lowercase__ : Any = num_block_repeats
lowercase__ : List[str] = expand_ratios
lowercase__ : str = squeeze_expansion_ratio
lowercase__ : List[Any] = hidden_act
lowercase__ : Optional[int] = hidden_dim
lowercase__ : Dict = pooling_type
lowercase__ : int = initializer_range
lowercase__ : Union[str, Any] = batch_norm_eps
lowercase__ : Optional[int] = batch_norm_momentum
lowercase__ : List[Any] = dropout_rate
lowercase__ : Union[str, Any] = drop_connect_rate
lowercase__ : str = sum(__SCREAMING_SNAKE_CASE ) * 4
class UpperCAmelCase( __UpperCAmelCase ):
"""simple docstring"""
a : Any = version.parse("""1.11""" )
@property
def __a ( self ) -> Dict:
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __a ( self ) -> str:
"""simple docstring"""
return 1E-5 | 716 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def snake_case_ ( *SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=2 ) -> Dict:
from .. import __version__
lowercase__ : List[str] = take_from
lowercase__ : Optional[Any] = ()
if not isinstance(args[0] ,SCREAMING_SNAKE_CASE_ ):
lowercase__ : Optional[Any] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(SCREAMING_SNAKE_CASE_ ).base_version ) >= version.parse(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
F""" version {__version__} is >= {version_name}""" )
lowercase__ : Tuple = None
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(SCREAMING_SNAKE_CASE_ ),)
lowercase__ : Optional[Any] = F"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
values += (getattr(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ),)
lowercase__ : str = F"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
lowercase__ : Any = F"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
lowercase__ : Union[str, Any] = warning + " " if standard_warn else ""
warnings.warn(warning + message ,SCREAMING_SNAKE_CASE_ ,stacklevel=SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) > 0:
lowercase__ : List[str] = inspect.getouterframes(inspect.currentframe() )[1]
lowercase__ : Union[str, Any] = call_frame.filename
lowercase__ : Optional[Any] = call_frame.lineno
lowercase__ : str = call_frame.function
lowercase__ , lowercase__ : List[str] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return
elif len(SCREAMING_SNAKE_CASE_ ) == 1:
return values[0]
return values | 298 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def snake_case_ (UpperCamelCase : Dict , UpperCamelCase : Optional[Any]=10 ):
'''simple docstring'''
_a = []
for _ in range(UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def snake_case_ (UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple=10 ):
'''simple docstring'''
_a = []
for step in range(UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_a = os.path.join(UpperCamelCase , '''schedule.bin''' )
torch.save(scheduler.state_dict() , UpperCamelCase )
_a = torch.load(UpperCamelCase )
scheduler.load_state_dict(UpperCamelCase )
return lrs
@require_torch
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] ) -> int:
"""simple docstring"""
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
for a, b in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertAlmostEqual(lowerCAmelCase_ , lowerCAmelCase_ , delta=lowerCAmelCase_ )
def __lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
_a = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCAmelCase_ )
_a = torch.tensor([0.4, 0.2, -0.5] )
_a = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_a = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(1_00 ):
_a = criterion(lowerCAmelCase_ , lowerCAmelCase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_a = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCAmelCase_ )
_a = torch.tensor([0.4, 0.2, -0.5] )
_a = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_a = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=lowerCAmelCase_ , weight_decay=0.0 , relative_step=lowerCAmelCase_ , scale_parameter=lowerCAmelCase_ , warmup_init=lowerCAmelCase_ , )
for _ in range(10_00 ):
_a = criterion(lowerCAmelCase_ , lowerCAmelCase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class A ( unittest.TestCase ):
lowercase_ = nn.Linear(50 ,50 ) if is_torch_available() else None
lowercase_ = AdamW(m.parameters() ,lr=10.0 ) if is_torch_available() else None
lowercase_ = 10
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int]=None ) -> Optional[int]:
"""simple docstring"""
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
for a, b in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
self.assertAlmostEqual(lowerCAmelCase_ , lowerCAmelCase_ , delta=lowerCAmelCase_ , msg=lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
_a = {'''num_warmup_steps''': 2, '''num_training_steps''': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_a = {
get_constant_schedule: ({}, [1_0.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'''num_warmup_steps''': 4},
[0.0, 2.5, 5.0, 7.5, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0, 1_0.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 1_0.0, 8.7_5, 7.5, 6.2_5, 5.0, 3.7_5, 2.5, 1.2_5],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 1_0.0, 9.6_1, 8.5_3, 6.9_1, 5.0, 3.0_8, 1.4_6, 0.3_8],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, '''num_cycles''': 2},
[0.0, 5.0, 1_0.0, 8.5_3, 5.0, 1.4_6, 1_0.0, 8.5_3, 5.0, 1.4_6],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, '''power''': 2.0, '''lr_end''': 1e-7},
[0.0, 5.0, 1_0.0, 7.6_5_6, 5.6_2_5, 3.9_0_6, 2.5, 1.4_0_6, 0.6_2_5, 0.1_5_6],
),
get_inverse_sqrt_schedule: (
{'''num_warmup_steps''': 2},
[0.0, 5.0, 1_0.0, 8.1_6_5, 7.0_7_1, 6.3_2_5, 5.7_7_4, 5.3_4_5, 5.0, 4.7_1_4],
),
}
for scheduler_func, data in scheds.items():
_a , _a = data
_a = scheduler_func(self.optimizer , **lowerCAmelCase_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
_a = unwrap_schedule(lowerCAmelCase_ , self.num_steps )
self.assertListAlmostEqual(
lowerCAmelCase_ , lowerCAmelCase_ , tol=1e-2 , msg=F'failed for {scheduler_func} in normal scheduler' , )
_a = scheduler_func(self.optimizer , **lowerCAmelCase_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(lowerCAmelCase_ ) # wrap to test picklability of the schedule
_a = unwrap_and_save_reload_schedule(lowerCAmelCase_ , self.num_steps )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ , msg=F'failed for {scheduler_func} in save and reload' )
class A :
def __init__( self : Optional[Any] , lowerCAmelCase_ : Tuple ) -> Optional[int]:
"""simple docstring"""
_a = fn
def __call__( self : List[str] , *lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Dict ) -> Optional[Any]:
"""simple docstring"""
return self.fn(*lowerCAmelCase_ , **lowerCAmelCase_ )
@classmethod
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : List[str] ) -> Tuple:
"""simple docstring"""
_a = list(map(self , scheduler.lr_lambdas ) )
| 22 |
'''simple docstring'''
class A__ :
def __init__( self :List[Any] , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :List[str] ) -> List[str]:
'''simple docstring'''
_a : List[str] =None
_a : Optional[Any] =None
_a : str =graph
self._normalize_graph(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : Optional[int] =len(SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =None
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :List[str] ) -> Any:
'''simple docstring'''
if sources is int:
_a : Tuple =[sources]
if sinks is int:
_a : Optional[int] =[sinks]
if len(SCREAMING_SNAKE_CASE ) == 0 or len(SCREAMING_SNAKE_CASE ) == 0:
return
_a : Union[str, Any] =sources[0]
_a : Tuple =sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(SCREAMING_SNAKE_CASE ) > 1 or len(SCREAMING_SNAKE_CASE ) > 1:
_a : Tuple =0
for i in sources:
max_input_flow += sum(self.graph[i] )
_a : List[Any] =len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_a : Any =max_input_flow
_a : List[str] =0
_a : List[str] =len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_a : str =max_input_flow
_a : Optional[Any] =size - 1
def __UpperCAmelCase ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception("""You need to set maximum flow algorithm before.""" )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Dict ) -> int:
'''simple docstring'''
_a : Tuple =algorithm(self )
class A__ :
def __init__( self :Tuple , SCREAMING_SNAKE_CASE :Optional[Any] ) -> Dict:
'''simple docstring'''
_a : List[str] =flow_network
_a : List[Any] =flow_network.verticesCount
_a : str =flow_network.sourceIndex
_a : str =flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_a : List[Any] =flow_network.graph
_a : Optional[int] =False
def __UpperCAmelCase ( self :List[Any] ) -> List[str]:
'''simple docstring'''
if not self.executed:
self._algorithm()
_a : Any =True
def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
pass
class A__ ( UpperCAmelCase__ ):
def __init__( self :int , SCREAMING_SNAKE_CASE :str ) -> int:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE )
# use this to save your result
_a : List[Any] =-1
def __UpperCAmelCase ( self :Dict ) -> Tuple:
'''simple docstring'''
if not self.executed:
raise Exception("""You should execute algorithm before using its result!""" )
return self.maximum_flow
class A__ ( UpperCAmelCase__ ):
def __init__( self :str , SCREAMING_SNAKE_CASE :Tuple ) -> str:
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE )
_a : int =[[0] * self.verticies_count for i in range(self.verticies_count )]
_a : Union[str, Any] =[0] * self.verticies_count
_a : Optional[Any] =[0] * self.verticies_count
def __UpperCAmelCase ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_a : int =self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_a : Tuple =[
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_a : List[Any] =0
while i < len(SCREAMING_SNAKE_CASE ):
_a : Any =vertices_list[i]
_a : str =self.heights[vertex_index]
self.process_vertex(SCREAMING_SNAKE_CASE )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(SCREAMING_SNAKE_CASE ) )
_a : List[str] =0
else:
i += 1
_a : Optional[int] =sum(self.preflow[self.source_index] )
def __UpperCAmelCase ( self :Optional[int] , SCREAMING_SNAKE_CASE :Union[str, Any] ) -> List[str]:
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.relabel(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :str ) -> List[str]:
'''simple docstring'''
_a : List[str] =min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def __UpperCAmelCase ( self :Any , SCREAMING_SNAKE_CASE :Any ) -> List[Any]:
'''simple docstring'''
_a : int =None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_a : Optional[Any] =self.heights[to_index]
if min_height is not None:
_a : Any =min_height + 1
if __name__ == "__main__":
A__: str = [0]
A__: Optional[Any] = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
A__: Tuple = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
A__: Union[str, Any] = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
A__: List[str] = flow_network.find_maximum_flow()
print(F"maximum flow is {maximum_flow}")
| 694 | 0 |
"""simple docstring"""
from math import pow, sqrt
def lowerCAmelCase__ ( *_UpperCamelCase : List[str] ) -> Tuple:
"""simple docstring"""
snake_case = len(lowerCAmelCase_ ) > 0 and all(value > 0.0 for value in values )
return result
def lowerCAmelCase__ ( _UpperCamelCase : Any , _UpperCamelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowerCAmelCase_ , lowerCAmelCase_ )
else ValueError('Input Error: Molar mass values must greater than 0.' )
)
def lowerCAmelCase__ ( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Any ) -> str:
"""simple docstring"""
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def lowerCAmelCase__ ( _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : Dict ) -> int:
"""simple docstring"""
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def lowerCAmelCase__ ( _UpperCamelCase : Any , _UpperCamelCase : Any , _UpperCamelCase : Dict ) -> List[str]:
"""simple docstring"""
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
| 703 | """simple docstring"""
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def lowerCAmelCase__ ( _UpperCamelCase : Dict[str, torch.Tensor] ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
snake_case = []
snake_case = []
snake_case = []
for rt in rc.restypes:
snake_case = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
snake_case = {name: i for i, name in enumerate(_UpperCamelCase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 1_4 )
restype_atomaa_to_atomaa_list.append([0] * 3_7 )
restype_atomaa_mask_list.append([0.0] * 1_4 )
snake_case = torch.tensor(
_UpperCamelCase , dtype=torch.intaa , device=protein['aatype'].device , )
snake_case = torch.tensor(
_UpperCamelCase , dtype=torch.intaa , device=protein['aatype'].device , )
snake_case = torch.tensor(
_UpperCamelCase , dtype=torch.floataa , device=protein['aatype'].device , )
snake_case = protein['aatype'].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
snake_case = restype_atomaa_to_atomaa[protein_aatype]
snake_case = restype_atomaa_mask[protein_aatype]
snake_case = residx_atomaa_mask
snake_case = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
snake_case = restype_atomaa_to_atomaa[protein_aatype]
snake_case = residx_atomaa_to_atomaa.long()
# create the corresponding mask
snake_case = torch.zeros([2_1, 3_7] , dtype=torch.floataa , device=protein['aatype'].device )
for restype, restype_letter in enumerate(rc.restypes ):
snake_case = rc.restype_atoa[restype_letter]
snake_case = rc.residue_atoms[restype_name]
for atom_name in atom_names:
snake_case = rc.atom_order[atom_name]
snake_case = 1
snake_case = restype_atomaa_mask[protein_aatype]
snake_case = residx_atomaa_mask
return protein
def lowerCAmelCase__ ( _UpperCamelCase : Dict[str, torch.Tensor] ) -> Dict[str, np.ndarray]:
"""simple docstring"""
snake_case = tree_map(lambda _UpperCamelCase : torch.tensor(_UpperCamelCase , device=batch['aatype'].device ) , _UpperCamelCase , np.ndarray )
snake_case = tensor_tree_map(lambda _UpperCamelCase : np.array(_UpperCamelCase ) , make_atomaa_masks(_UpperCamelCase ) )
return out
| 104 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def __lowerCAmelCase ( A_ : List[str] ) -> Optional[Any]:
__UpperCAmelCase = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
__UpperCAmelCase = [1_44, 1_92, 2_40]
__UpperCAmelCase = [16, 32, 64, 96, 1_28, 1_60, 6_40]
elif "mobilevit_xs" in mobilevit_name:
__UpperCAmelCase = [96, 1_20, 1_44]
__UpperCAmelCase = [16, 32, 48, 64, 80, 96, 3_84]
elif "mobilevit_xxs" in mobilevit_name:
__UpperCAmelCase = [64, 80, 96]
__UpperCAmelCase = [16, 16, 24, 48, 64, 80, 3_20]
__UpperCAmelCase = 0.05
__UpperCAmelCase = 2.0
if mobilevit_name.startswith("deeplabv3_" ):
__UpperCAmelCase = 5_12
__UpperCAmelCase = 16
__UpperCAmelCase = 21
__UpperCAmelCase = "pascal-voc-id2label.json"
else:
__UpperCAmelCase = 10_00
__UpperCAmelCase = "imagenet-1k-id2label.json"
__UpperCAmelCase = "huggingface/label-files"
__UpperCAmelCase = json.load(open(hf_hub_download(_A , _A , repo_type="dataset" ) , "r" ) )
__UpperCAmelCase = {int(_A ): v for k, v in idalabel.items()}
__UpperCAmelCase = idalabel
__UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def __lowerCAmelCase ( A_ : Optional[int] , A_ : Dict=False ) -> Dict:
for i in range(1 , 6 ):
if F'''layer_{i}.''' in name:
__UpperCAmelCase = name.replace(F'''layer_{i}.''' , F'''encoder.layer.{i - 1}.''' )
if "conv_1." in name:
__UpperCAmelCase = name.replace("conv_1." , "conv_stem." )
if ".block." in name:
__UpperCAmelCase = name.replace(".block." , "." )
if "exp_1x1" in name:
__UpperCAmelCase = name.replace("exp_1x1" , "expand_1x1" )
if "red_1x1" in name:
__UpperCAmelCase = name.replace("red_1x1" , "reduce_1x1" )
if ".local_rep.conv_3x3." in name:
__UpperCAmelCase = name.replace(".local_rep.conv_3x3." , ".conv_kxk." )
if ".local_rep.conv_1x1." in name:
__UpperCAmelCase = name.replace(".local_rep.conv_1x1." , ".conv_1x1." )
if ".norm." in name:
__UpperCAmelCase = name.replace(".norm." , ".normalization." )
if ".conv." in name:
__UpperCAmelCase = name.replace(".conv." , ".convolution." )
if ".conv_proj." in name:
__UpperCAmelCase = name.replace(".conv_proj." , ".conv_projection." )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F'''.{i}.{j}.''' in name:
__UpperCAmelCase = name.replace(F'''.{i}.{j}.''' , F'''.{i}.layer.{j}.''' )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F'''.{i}.{j}.''' in name:
__UpperCAmelCase = name.replace(F'''.{i}.{j}.''' , F'''.{i}.''' )
if "expand_1x1" in name:
__UpperCAmelCase = name.replace("expand_1x1" , "downsampling_layer.expand_1x1" )
if "conv_3x3" in name:
__UpperCAmelCase = name.replace("conv_3x3" , "downsampling_layer.conv_3x3" )
if "reduce_1x1" in name:
__UpperCAmelCase = name.replace("reduce_1x1" , "downsampling_layer.reduce_1x1" )
for i in range(2 , 5 ):
if F'''.global_rep.{i}.weight''' in name:
__UpperCAmelCase = name.replace(F'''.global_rep.{i}.weight''' , ".layernorm.weight" )
if F'''.global_rep.{i}.bias''' in name:
__UpperCAmelCase = name.replace(F'''.global_rep.{i}.bias''' , ".layernorm.bias" )
if ".global_rep." in name:
__UpperCAmelCase = name.replace(".global_rep." , ".transformer." )
if ".pre_norm_mha.0." in name:
__UpperCAmelCase = name.replace(".pre_norm_mha.0." , ".layernorm_before." )
if ".pre_norm_mha.1.out_proj." in name:
__UpperCAmelCase = name.replace(".pre_norm_mha.1.out_proj." , ".attention.output.dense." )
if ".pre_norm_ffn.0." in name:
__UpperCAmelCase = name.replace(".pre_norm_ffn.0." , ".layernorm_after." )
if ".pre_norm_ffn.1." in name:
__UpperCAmelCase = name.replace(".pre_norm_ffn.1." , ".intermediate.dense." )
if ".pre_norm_ffn.4." in name:
__UpperCAmelCase = name.replace(".pre_norm_ffn.4." , ".output.dense." )
if ".transformer." in name:
__UpperCAmelCase = name.replace(".transformer." , ".transformer.layer." )
if ".aspp_layer." in name:
__UpperCAmelCase = name.replace(".aspp_layer." , "." )
if ".aspp_pool." in name:
__UpperCAmelCase = name.replace(".aspp_pool." , "." )
if "seg_head." in name:
__UpperCAmelCase = name.replace("seg_head." , "segmentation_head." )
if "segmentation_head.classifier.classifier." in name:
__UpperCAmelCase = name.replace("segmentation_head.classifier.classifier." , "segmentation_head.classifier." )
if "classifier.fc." in name:
__UpperCAmelCase = name.replace("classifier.fc." , "classifier." )
elif (not base_model) and ("segmentation_head." not in name):
__UpperCAmelCase = "mobilevit." + name
return name
def __lowerCAmelCase ( A_ : Any , A_ : Union[str, Any] , A_ : Optional[Any]=False ) -> Tuple:
if base_model:
__UpperCAmelCase = ""
else:
__UpperCAmelCase = "mobilevit."
for key in orig_state_dict.copy().keys():
__UpperCAmelCase = orig_state_dict.pop(_A )
if key[:8] == "encoder.":
__UpperCAmelCase = key[8:]
if "qkv" in key:
__UpperCAmelCase = key.split("." )
__UpperCAmelCase = int(key_split[0][6:] ) - 1
__UpperCAmelCase = int(key_split[3] )
__UpperCAmelCase = model.get_submodule(F'''{model_prefix}encoder.layer.{layer_num}''' )
__UpperCAmelCase = layer.transformer.layer[transformer_num].attention.attention.all_head_size
__UpperCAmelCase = (
F'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'''
)
if "weight" in key:
__UpperCAmelCase = val[:dim, :]
__UpperCAmelCase = val[dim : dim * 2, :]
__UpperCAmelCase = val[-dim:, :]
else:
__UpperCAmelCase = val[:dim]
__UpperCAmelCase = val[dim : dim * 2]
__UpperCAmelCase = val[-dim:]
else:
__UpperCAmelCase = val
return orig_state_dict
def __lowerCAmelCase ( ) -> Union[str, Any]:
__UpperCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__UpperCAmelCase = Image.open(requests.get(_A , stream=_A ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase ( A_ : str , A_ : int , A_ : int , A_ : str=False ) -> Union[str, Any]:
__UpperCAmelCase = get_mobilevit_config(_A )
# load original state_dict
__UpperCAmelCase = torch.load(_A , map_location="cpu" )
# load 🤗 model
if mobilevit_name.startswith("deeplabv3_" ):
__UpperCAmelCase = MobileViTForSemanticSegmentation(_A ).eval()
else:
__UpperCAmelCase = MobileViTForImageClassification(_A ).eval()
__UpperCAmelCase = convert_state_dict(_A , _A )
model.load_state_dict(_A )
# Check outputs on an image, prepared by MobileViTImageProcessor
__UpperCAmelCase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
__UpperCAmelCase = image_processor(images=prepare_img() , return_tensors="pt" )
__UpperCAmelCase = model(**_A )
__UpperCAmelCase = outputs.logits
if mobilevit_name.startswith("deeplabv3_" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
__UpperCAmelCase = torch.tensor(
[
[[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],
[[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],
[[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
__UpperCAmelCase = torch.tensor(
[
[[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],
[[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],
[[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
__UpperCAmelCase = torch.tensor(
[
[[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],
] )
else:
raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3, :3, :3] , _A , atol=1e-4 )
else:
assert logits.shape == (1, 10_00)
if mobilevit_name == "mobilevit_s":
__UpperCAmelCase = torch.tensor([-0.98_66, 0.23_92, -1.12_41] )
elif mobilevit_name == "mobilevit_xs":
__UpperCAmelCase = torch.tensor([-2.47_61, -0.93_99, -1.95_87] )
elif mobilevit_name == "mobilevit_xxs":
__UpperCAmelCase = torch.tensor([-1.93_64, -1.23_27, -0.46_53] )
else:
raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3] , _A , atol=1e-4 )
Path(_A ).mkdir(exist_ok=_A )
print(F'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_A )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_A )
if push_to_hub:
__UpperCAmelCase = {
"mobilevit_s": "mobilevit-small",
"mobilevit_xs": "mobilevit-x-small",
"mobilevit_xxs": "mobilevit-xx-small",
"deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small",
"deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small",
"deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small",
}
print("Pushing to the hub..." )
__UpperCAmelCase = model_mapping[mobilevit_name]
image_processor.push_to_hub(_A , organization="apple" )
model.push_to_hub(_A , organization="apple" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--mobilevit_name""",
default="""mobilevit_s""",
type=str,
help=(
"""Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"""
""" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."""
),
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
a_ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 221 | import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
SCREAMING_SNAKE_CASE : List[Any] = get_logger(__name__)
class A_ ( enum.Enum ):
_SCREAMING_SNAKE_CASE = """all_checks"""
_SCREAMING_SNAKE_CASE = """basic_checks"""
_SCREAMING_SNAKE_CASE = """no_checks"""
class A_ ( a_ ):
pass
class A_ ( a_ ):
pass
class A_ ( a_ ):
pass
class A_ ( a_ ):
pass
def __A ( _A , _A , _A=None ):
"""simple docstring"""
if expected_checksums is None:
logger.info("Unable to verify checksums." )
return
if len(set(_A ) - set(_A ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(_A ) - set(_A ) ) )
if len(set(_A ) - set(_A ) ) > 0:
raise UnexpectedDownloadedFile(str(set(_A ) - set(_A ) ) )
__a = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
__a = " for " + verification_name if verification_name is not None else ""
if len(_A ) > 0:
raise NonMatchingChecksumError(
f"""Checksums didn't match{for_verification_name}:\n"""
f"""{bad_urls}\n"""
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" )
logger.info("All the checksums matched successfully" + for_verification_name )
class A_ ( a_ ):
pass
class A_ ( a_ ):
pass
class A_ ( a_ ):
pass
class A_ ( a_ ):
pass
def __A ( _A , _A ):
"""simple docstring"""
if expected_splits is None:
logger.info("Unable to verify splits sizes." )
return
if len(set(_A ) - set(_A ) ) > 0:
raise ExpectedMoreSplits(str(set(_A ) - set(_A ) ) )
if len(set(_A ) - set(_A ) ) > 0:
raise UnexpectedSplits(str(set(_A ) - set(_A ) ) )
__a = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(_A ) > 0:
raise NonMatchingSplitsSizesError(str(_A ) )
logger.info("All the splits matched successfully." )
def __A ( _A , _A = True ):
"""simple docstring"""
if record_checksum:
__a = shaaaa()
with open(_A , "rb" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B"" ):
m.update(_A )
__a = m.hexdigest()
else:
__a = None
return {"num_bytes": os.path.getsize(_A ), "checksum": checksum}
def __A ( _A ):
"""simple docstring"""
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 197 | 0 |
lowercase_ = tuple[float, float, float]
lowercase_ = tuple[float, float, float]
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = end_pointa[0] - end_pointa[0]
lowercase__ = end_pointa[1] - end_pointa[1]
lowercase__ = end_pointa[2] - end_pointa[2]
return (x, y, z)
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = ab[1] * ac[2] - ab[2] * ac[1] # *i
lowercase__ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
lowercase__ = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return tuple(round(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for x in vector ) == (0, 0, 0)
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 10 ):
lowercase__ = create_vector(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ = create_vector(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return is_zero_vector(get_ad_vectors_cross(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
| 37 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowercase__ = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
lowercase__ = f'''{src_lang}-{tgt_lang}'''
lowercase__ = f'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
lowercase__ = os.path.join(SCREAMING_SNAKE_CASE_ , "README.md" )
print(f'''Generating {path}''' )
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
# make sure we are under the root of the project
lowercase_ = Path(__file__).resolve().parent.parent.parent
lowercase_ = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowercase_ , lowercase_ , lowercase_ = model_name.split("""-""")
lowercase_ = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 37 | 1 |
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class lowerCAmelCase_ :
def __snake_case ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case : List[Any] =TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
snake_case : Dict =AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
snake_case : int =UNetaDConditionModel(
sample_size=32, layers_per_block=1, block_out_channels=[32, 64], down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
], mid_block_type='''UNetMidBlock2DSimpleCrossAttn''', up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''], in_channels=3, out_channels=6, cross_attention_dim=32, encoder_hid_dim=32, attention_head_dim=8, addition_embed_type='''text''', addition_embed_type_num_heads=2, cross_attention_norm='''group_norm''', resnet_time_scale_shift='''scale_shift''', act_fn='''gelu''', )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
snake_case : Any =DDPMScheduler(
num_train_timesteps=1_000, beta_schedule='''squaredcos_cap_v2''', beta_start=0.0001, beta_end=0.02, thresholding=_snake_case, dynamic_thresholding_ratio=0.95, sample_max_value=1.0, prediction_type='''epsilon''', variance_type='''learned_range''', )
torch.manual_seed(0 )
snake_case : List[str] =IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __snake_case ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
snake_case : Optional[Any] =TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
snake_case : int =AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
snake_case : Optional[int] =UNetaDConditionModel(
sample_size=32, layers_per_block=[1, 2], block_out_channels=[32, 64], down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
], mid_block_type='''UNetMidBlock2DSimpleCrossAttn''', up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''], in_channels=6, out_channels=6, cross_attention_dim=32, encoder_hid_dim=32, attention_head_dim=8, addition_embed_type='''text''', addition_embed_type_num_heads=2, cross_attention_norm='''group_norm''', resnet_time_scale_shift='''scale_shift''', act_fn='''gelu''', class_embed_type='''timestep''', mid_block_scale_factor=1.414, time_embedding_act_fn='''gelu''', time_embedding_dim=32, )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
snake_case : Dict =DDPMScheduler(
num_train_timesteps=1_000, beta_schedule='''squaredcos_cap_v2''', beta_start=0.0001, beta_end=0.02, thresholding=_snake_case, dynamic_thresholding_ratio=0.95, sample_max_value=1.0, prediction_type='''epsilon''', variance_type='''learned_range''', )
torch.manual_seed(0 )
snake_case : Tuple =DDPMScheduler(
num_train_timesteps=1_000, beta_schedule='''squaredcos_cap_v2''', beta_start=0.0001, beta_end=0.02, )
torch.manual_seed(0 )
snake_case : Dict =IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __snake_case ( self : int ):
'''simple docstring'''
snake_case : List[Any] =self.get_dummy_components()
snake_case : Dict =self.pipeline_class(**_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
snake_case : Optional[int] =self.get_dummy_inputs(_snake_case )
snake_case : Any =inputs['''prompt''']
snake_case : str =inputs['''generator''']
snake_case : Union[str, Any] =inputs['''num_inference_steps''']
snake_case : Dict =inputs['''output_type''']
if "image" in inputs:
snake_case : Union[str, Any] =inputs['''image''']
else:
snake_case : List[str] =None
if "mask_image" in inputs:
snake_case : Optional[int] =inputs['''mask_image''']
else:
snake_case : Union[str, Any] =None
if "original_image" in inputs:
snake_case : Any =inputs['''original_image''']
else:
snake_case : List[Any] =None
snake_case , snake_case : Dict =pipe.encode_prompt(_snake_case )
# inputs with prompt converted to embeddings
snake_case : Union[str, Any] ={
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
snake_case : int =image
if mask_image is not None:
snake_case : List[str] =mask_image
if original_image is not None:
snake_case : Any =original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_snake_case, _snake_case, _snake_case )
snake_case : Any =pipe(**_snake_case )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_snake_case )
snake_case : int =self.pipeline_class.from_pretrained(_snake_case )
pipe_loaded.to(_snake_case )
pipe_loaded.set_progress_bar_config(disable=_snake_case )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_snake_case, _snake_case ) is None, f'''`{optional_component}` did not stay set to None after loading.''', )
snake_case : List[Any] =self.get_dummy_inputs(_snake_case )
snake_case : str =inputs['''generator''']
snake_case : List[str] =inputs['''num_inference_steps''']
snake_case : Tuple =inputs['''output_type''']
# inputs with prompt converted to embeddings
snake_case : Optional[int] ={
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
snake_case : Optional[Any] =image
if mask_image is not None:
snake_case : Tuple =mask_image
if original_image is not None:
snake_case : Any =original_image
snake_case : Optional[int] =pipe_loaded(**_snake_case )[0]
snake_case : str =np.abs(to_np(_snake_case ) - to_np(_snake_case ) ).max()
self.assertLess(_snake_case, 1E-4 )
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
snake_case : List[str] =self.get_dummy_components()
snake_case : Optional[Any] =self.pipeline_class(**_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
snake_case : str =self.get_dummy_inputs(_snake_case )
snake_case : Optional[int] =pipe(**_snake_case )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_snake_case )
snake_case : Union[str, Any] =self.pipeline_class.from_pretrained(_snake_case )
pipe_loaded.to(_snake_case )
pipe_loaded.set_progress_bar_config(disable=_snake_case )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
snake_case : Tuple =self.get_dummy_inputs(_snake_case )
snake_case : int =pipe_loaded(**_snake_case )[0]
snake_case : Optional[Any] =np.abs(to_np(_snake_case ) - to_np(_snake_case ) ).max()
self.assertLess(_snake_case, 1E-4 )
| 349 |
'''simple docstring'''
import math
import qiskit
def _a ( lowerCamelCase_ = 1 , lowerCamelCase_ = 1 , lowerCamelCase_ = 1 ):
if (
isinstance(lowerCamelCase_ , lowerCamelCase_ )
or isinstance(lowerCamelCase_ , lowerCamelCase_ )
or isinstance(lowerCamelCase_ , lowerCamelCase_ )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(lowerCamelCase_ ) != input_a)
or (math.floor(lowerCamelCase_ ) != input_a)
or (math.floor(lowerCamelCase_ ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
snake_case : List[Any] =qiskit.QuantumRegister(4 , '''qr''' )
snake_case : Optional[Any] =qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
snake_case : Any =[input_a, input_a, carry_in]
snake_case : List[str] =qiskit.QuantumCircuit(lowerCamelCase_ , lowerCamelCase_ )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(lowerCamelCase_ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(lowerCamelCase_ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(lowerCamelCase_ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , lowerCamelCase_ ) # measure the last two qbits
snake_case : List[str] =qiskit.Aer.get_backend('''aer_simulator''' )
snake_case : Optional[int] =qiskit.execute(lowerCamelCase_ , lowerCamelCase_ , shots=10_00 )
return job.result().get_counts(lowerCamelCase_ )
if __name__ == "__main__":
print(f"Total sum count for state is: {quantum_full_adder(1, 1, 1)}")
| 349 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
'''facebook/xmod-base''': '''https://huggingface.co/facebook/xmod-base/resolve/main/config.json''',
'''facebook/xmod-large-prenorm''': '''https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json''',
'''facebook/xmod-base-13-125k''': '''https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-125k''': '''https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-195k''': '''https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json''',
'''facebook/xmod-base-60-125k''': '''https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json''',
'''facebook/xmod-base-60-265k''': '''https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json''',
'''facebook/xmod-base-75-125k''': '''https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json''',
'''facebook/xmod-base-75-269k''': '''https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json''',
}
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = "xmod"
def __init__( self : Union[str, Any], _UpperCAmelCase : str=3_0_5_2_2, _UpperCAmelCase : List[str]=7_6_8, _UpperCAmelCase : str=1_2, _UpperCAmelCase : Tuple=1_2, _UpperCAmelCase : Dict=3_0_7_2, _UpperCAmelCase : List[Any]="gelu", _UpperCAmelCase : List[Any]=0.1, _UpperCAmelCase : Optional[Any]=0.1, _UpperCAmelCase : int=5_1_2, _UpperCAmelCase : Tuple=2, _UpperCAmelCase : Tuple=0.02, _UpperCAmelCase : Union[str, Any]=1E-12, _UpperCAmelCase : Union[str, Any]=1, _UpperCAmelCase : Any=0, _UpperCAmelCase : List[str]=2, _UpperCAmelCase : Optional[Any]="absolute", _UpperCAmelCase : int=True, _UpperCAmelCase : Optional[Any]=None, _UpperCAmelCase : Optional[Any]=False, _UpperCAmelCase : Any=2, _UpperCAmelCase : Optional[int]=False, _UpperCAmelCase : List[str]=True, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : int=("en_XX",), _UpperCAmelCase : Optional[Any]=None, **_UpperCAmelCase : Union[str, Any], ) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase, bos_token_id=_UpperCAmelCase, eos_token_id=_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = vocab_size
SCREAMING_SNAKE_CASE__ : Dict = hidden_size
SCREAMING_SNAKE_CASE__ : int = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Any = num_attention_heads
SCREAMING_SNAKE_CASE__ : Any = hidden_act
SCREAMING_SNAKE_CASE__ : Dict = intermediate_size
SCREAMING_SNAKE_CASE__ : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : str = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE__ : Tuple = initializer_range
SCREAMING_SNAKE_CASE__ : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : str = position_embedding_type
SCREAMING_SNAKE_CASE__ : Dict = use_cache
SCREAMING_SNAKE_CASE__ : Tuple = classifier_dropout
SCREAMING_SNAKE_CASE__ : int = pre_norm
SCREAMING_SNAKE_CASE__ : List[str] = adapter_reduction_factor
SCREAMING_SNAKE_CASE__ : Dict = adapter_layer_norm
SCREAMING_SNAKE_CASE__ : Tuple = adapter_reuse_layer_norm
SCREAMING_SNAKE_CASE__ : Dict = ln_before_adapter
SCREAMING_SNAKE_CASE__ : List[Any] = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = default_language
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
@property
def A_ ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE__ : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 157 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> str | Literal[False]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = list(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = list(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = 0
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if lista[i] != lista[i]:
count += 1
SCREAMING_SNAKE_CASE__ : Any = "_"
if count > 1:
return False
else:
return "".join(SCREAMING_SNAKE_CASE__ )
def _a ( SCREAMING_SNAKE_CASE__ : list[str] ) -> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
while True:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["$"] * len(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = []
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
for j in range(i + 1 , len(SCREAMING_SNAKE_CASE__ ) ):
SCREAMING_SNAKE_CASE__ : str = compare_string(binary[i] , binary[j] )
if k is False:
SCREAMING_SNAKE_CASE__ : int = "*"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "*"
temp.append("X" )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return pi
SCREAMING_SNAKE_CASE__ : List[str] = list(set(SCREAMING_SNAKE_CASE__ ) )
def _a ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Sequence[float] ) -> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for minterm in minterms:
SCREAMING_SNAKE_CASE__ : Optional[int] = ""
for _ in range(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Any = str(minterm % 2 ) + string
minterm //= 2
temp.append(SCREAMING_SNAKE_CASE__ )
return temp
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = 0
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def _a ( SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : list[str] ) -> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = [0] * len(SCREAMING_SNAKE_CASE__ )
for i in range(len(chart[0] ) ):
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : Optional[Any] = -1
for j in range(len(SCREAMING_SNAKE_CASE__ ) ):
if chart[j][i] == 1:
count += 1
SCREAMING_SNAKE_CASE__ : List[Any] = j
if count == 1:
SCREAMING_SNAKE_CASE__ : List[str] = 1
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(SCREAMING_SNAKE_CASE__ ) ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
temp.append(prime_implicants[i] )
while True:
SCREAMING_SNAKE_CASE__ : List[str] = 0
SCREAMING_SNAKE_CASE__ : Optional[Any] = -1
SCREAMING_SNAKE_CASE__ : List[str] = 0
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
SCREAMING_SNAKE_CASE__ : int = chart[i].count(1 )
if count_n > max_n:
SCREAMING_SNAKE_CASE__ : Tuple = count_n
SCREAMING_SNAKE_CASE__ : str = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(SCREAMING_SNAKE_CASE__ ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
def _a ( SCREAMING_SNAKE_CASE__ : list[str] , SCREAMING_SNAKE_CASE__ : list[str] ) -> list[list[int]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = [[0 for x in range(len(SCREAMING_SNAKE_CASE__ ) )] for x in range(len(SCREAMING_SNAKE_CASE__ ) )]
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prime_implicants[i].count("_" )
for j in range(len(SCREAMING_SNAKE_CASE__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
return chart
def _a ( ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = int(input("Enter the no. of variables\n" ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = [
float(SCREAMING_SNAKE_CASE__ )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
SCREAMING_SNAKE_CASE__ : Tuple = decimal_to_binary(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Tuple = check(SCREAMING_SNAKE_CASE__ )
print("Prime Implicants are:" )
print(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = prime_implicant_chart(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : str = selection(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print("Essential Prime Implicants are:" )
print(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 157 | 1 |
class _a :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Dict ):
A_ = None
A_ = None
A_ = graph
self._normalize_graph(UpperCAmelCase , UpperCAmelCase )
A_ = len(UpperCAmelCase )
A_ = None
def __A ( self : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple ):
if sources is int:
A_ = [sources]
if sinks is int:
A_ = [sinks]
if len(UpperCAmelCase ) == 0 or len(UpperCAmelCase ) == 0:
return
A_ = sources[0]
A_ = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(UpperCAmelCase ) > 1 or len(UpperCAmelCase ) > 1:
A_ = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
A_ = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
A_ = max_input_flow
A_ = 0
A_ = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
A_ = max_input_flow
A_ = size - 1
def __A ( self : str ):
if self.maximum_flow_algorithm is None:
raise Exception("You need to set maximum flow algorithm before." )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def __A ( self : Tuple , UpperCAmelCase : List[Any] ):
A_ = algorithm(self )
class _a :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : List[str] ):
A_ = flow_network
A_ = flow_network.verticesCount
A_ = flow_network.sourceIndex
A_ = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
A_ = flow_network.graph
A_ = False
def __A ( self : Optional[int] ):
if not self.executed:
self._algorithm()
A_ = True
def __A ( self : Dict ):
pass
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : List[Any] ):
super().__init__(UpperCAmelCase )
# use this to save your result
A_ = -1
def __A ( self : Tuple ):
if not self.executed:
raise Exception("You should execute algorithm before using its result!" )
return self.maximum_flow
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : Union[str, Any] ):
super().__init__(UpperCAmelCase )
A_ = [[0] * self.verticies_count for i in range(self.verticies_count )]
A_ = [0] * self.verticies_count
A_ = [0] * self.verticies_count
def __A ( self : List[str] ):
A_ = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
A_ = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
A_ = 0
while i < len(UpperCAmelCase ):
A_ = vertices_list[i]
A_ = self.heights[vertex_index]
self.process_vertex(UpperCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(UpperCAmelCase ) )
A_ = 0
else:
i += 1
A_ = sum(self.preflow[self.source_index] )
def __A ( self : List[str] , UpperCAmelCase : Dict ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(UpperCAmelCase , UpperCAmelCase )
self.relabel(UpperCAmelCase )
def __A ( self : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : str ):
A_ = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def __A ( self : Optional[Any] , UpperCAmelCase : List[Any] ):
A_ = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
A_ = self.heights[to_index]
if min_height is not None:
A_ = min_height + 1
if __name__ == "__main__":
__a :Tuple = [0]
__a :Tuple = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__a :List[str] = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__a :List[str] = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__a :List[Any] = flow_network.find_maximum_flow()
print(F"maximum flow is {maximum_flow}") | 86 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_lowerCamelCase = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class __A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , a__ , a__=7 , a__=3 , a__=18 , a__=30 , a__=400 , a__=None , a__=True , a__=True , a__=None , ):
"""simple docstring"""
_lowerCamelCase : str = size if size is not None else {'''height''': 20, '''width''': 20}
_lowerCamelCase : int = parent
_lowerCamelCase : Tuple = batch_size
_lowerCamelCase : Union[str, Any] = num_channels
_lowerCamelCase : Tuple = image_size
_lowerCamelCase : Dict = min_resolution
_lowerCamelCase : int = max_resolution
_lowerCamelCase : Dict = size
_lowerCamelCase : Dict = do_normalize
_lowerCamelCase : List[Any] = do_convert_rgb
_lowerCamelCase : Optional[int] = [512, 1024, 2048, 4096]
_lowerCamelCase : Optional[int] = patch_size if patch_size is not None else {'''height''': 16, '''width''': 16}
def __snake_case ( self):
"""simple docstring"""
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Dict = '''https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'''
_lowerCamelCase : Union[str, Any] = Image.open(requests.get(a__ , stream=a__).raw).convert('''RGB''')
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 ,reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" ,)
@require_torch
@require_vision
class __A ( lowerCamelCase__ ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = PixaStructImageProcessor if is_vision_available() else None
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : int = PixaStructImageProcessingTester(self)
@property
def __snake_case ( self):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(a__ , '''do_normalize'''))
self.assertTrue(hasattr(a__ , '''do_convert_rgb'''))
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Any = self.image_processor_tester.prepare_dummy_image()
_lowerCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict)
_lowerCamelCase : Optional[int] = 2048
_lowerCamelCase : Any = image_processor(a__ , return_tensors='''pt''' , max_patches=a__)
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606) , atol=1e-3 , rtol=1e-3))
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_lowerCamelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__)
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image)
# Test not batched input
_lowerCamelCase : Optional[Any] = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_lowerCamelCase : Optional[int] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=a__).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_lowerCamelCase : Dict = image_processor(
a__ , return_tensors='''pt''' , max_patches=a__).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_lowerCamelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__)
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image)
# Test not batched input
_lowerCamelCase : List[str] = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
_lowerCamelCase : Dict = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(a__):
_lowerCamelCase : List[Any] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=a__).flattened_patches
_lowerCamelCase : List[Any] = '''Hello'''
_lowerCamelCase : int = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=a__ , header_text=a__).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_lowerCamelCase : Dict = image_processor(
a__ , return_tensors='''pt''' , max_patches=a__ , header_text=a__).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_lowerCamelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , numpify=a__)
for image in image_inputs:
self.assertIsInstance(a__ , np.ndarray)
_lowerCamelCase : Any = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_lowerCamelCase : List[str] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=a__).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_lowerCamelCase : Tuple = image_processor(
a__ , return_tensors='''pt''' , max_patches=a__).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_lowerCamelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__)
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor)
# Test not batched input
_lowerCamelCase : Tuple = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_lowerCamelCase : Optional[int] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=a__).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_lowerCamelCase : List[str] = image_processor(
a__ , return_tensors='''pt''' , max_patches=a__).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 ,reason="""`Pix2StructImageProcessor` requires `torch>=1.11.0`.""" ,)
@require_torch
@require_vision
class __A ( lowerCamelCase__ ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = PixaStructImageProcessor if is_vision_available() else None
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = PixaStructImageProcessingTester(self , num_channels=4)
_lowerCamelCase : Union[str, Any] = 3
@property
def __snake_case ( self):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : str = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(a__ , '''do_normalize'''))
self.assertTrue(hasattr(a__ , '''do_convert_rgb'''))
def __snake_case ( self):
"""simple docstring"""
_lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_lowerCamelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__)
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image)
# Test not batched input
_lowerCamelCase : List[Any] = (
(self.image_processor_tester.patch_size['''height'''] * self.image_processor_tester.patch_size['''width'''])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_lowerCamelCase : Union[str, Any] = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=a__).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_lowerCamelCase : Union[str, Any] = image_processor(
a__ , return_tensors='''pt''' , max_patches=a__).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 114 | 0 |
import os
from typing import Dict, List, Tuple, TypeVar, Union
__SCREAMING_SNAKE_CASE : Any = TypeVar('''T''')
__SCREAMING_SNAKE_CASE : Any = Union[List[T], Tuple[T, ...]]
__SCREAMING_SNAKE_CASE : str = Union[T, List[T], Dict[str, T]]
__SCREAMING_SNAKE_CASE : Optional[int] = Union[str, bytes, os.PathLike]
| 149 |
def snake_case_ ( lowercase__ : int ):
'''simple docstring'''
_lowerCAmelCase =n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 149 | 1 |